diff options
author | Jakub Jelinek <jakub@redhat.com> | 2007-07-12 18:26:36 +0000 |
---|---|---|
committer | Jakub Jelinek <jakub@redhat.com> | 2007-07-12 18:26:36 +0000 |
commit | 0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (patch) | |
tree | 2ea1f8305970753e4a657acb2ccc15ca3eec8e2c /sysdeps/sparc/sparc64 | |
parent | 7d58530341304d403a6626d7f7a1913165fe2f32 (diff) | |
download | glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.zip glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.gz glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.bz2 |
2.5-18.1
Diffstat (limited to 'sysdeps/sparc/sparc64')
31 files changed, 1532 insertions, 440 deletions
diff --git a/sysdeps/sparc/sparc64/Dist b/sysdeps/sparc/sparc64/Dist deleted file mode 100644 index dbf11d5..0000000 --- a/sysdeps/sparc/sparc64/Dist +++ /dev/null @@ -1 +0,0 @@ -hp-timing.c diff --git a/sysdeps/sparc/sparc64/Makefile b/sysdeps/sparc/sparc64/Makefile index 6335984..c1df317 100644 --- a/sysdeps/sparc/sparc64/Makefile +++ b/sysdeps/sparc/sparc64/Makefile @@ -1,6 +1,3 @@ -# The Sparc `long double' is a distinct type we support. -long-double-fcts = yes - ifeq ($(subdir),csu) sysdep_routines += hp-timing elide-routines.os += hp-timing diff --git a/sysdeps/sparc/sparc64/Versions b/sysdeps/sparc/sparc64/Versions index 99fe741..4cef7bc 100644 --- a/sysdeps/sparc/sparc64/Versions +++ b/sysdeps/sparc/sparc64/Versions @@ -5,3 +5,10 @@ libc { __align_cpy_8; __align_cpy_16; } } +libm { + GLIBC_2.1 { + # A generic bug got this omitted from other configurations' version + # sets, but we always had it. + exp2l; + } +} diff --git a/sysdeps/sparc/sparc64/bits/atomic.h b/sysdeps/sparc/sparc64/bits/atomic.h index 8f97753..d0a64af 100644 --- a/sysdeps/sparc/sparc64/bits/atomic.h +++ b/sysdeps/sparc/sparc64/bits/atomic.h @@ -1,5 +1,5 @@ /* Atomic operations. sparc64 version. - Copyright (C) 2003 Free Software Foundation, Inc. + Copyright (C) 2003, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>, 2003. @@ -59,7 +59,7 @@ typedef uintmax_t uatomic_max_t; __asm __volatile ("cas [%4], %2, %0" \ : "=r" (__acev_tmp), "=m" (*__acev_mem) \ : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval)); \ + "0" (newval) : "memory"); \ __acev_tmp; }) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ @@ -69,7 +69,7 @@ typedef uintmax_t uatomic_max_t; __asm __volatile ("casx [%4], %2, %0" \ : "=r" (__acev_tmp), "=m" (*__acev_mem) \ : "r" ((long) (oldval)), "m" (*__acev_mem), \ - "r" (__acev_mem), "0" ((long) (newval))); \ + "r" (__acev_mem), "0" ((long) (newval)) : "memory"); \ __acev_tmp; }) #define atomic_exchange_acq(mem, newvalue) \ @@ -80,7 +80,7 @@ typedef uintmax_t uatomic_max_t; if (sizeof (*(mem)) == 4) \ __asm ("swap %0, %1" \ : "=m" (*__memp), "=r" (__oldval) \ - : "m" (*__memp), "1" (__value)); \ + : "m" (*__memp), "1" (__value) : "memory"); \ else \ { \ __val = *__memp; \ @@ -94,6 +94,12 @@ typedef uintmax_t uatomic_max_t; } \ __oldval; }) +#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) + +#define atomic_exchange_24_rel(mem, newval) \ + atomic_exchange_rel (mem, newval) + #define atomic_full_barrier() \ __asm __volatile ("membar #LoadLoad | #LoadStore" \ " | #StoreLoad | #StoreStore" : : : "memory") diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h index 474aa0e..314a784 100644 --- a/sysdeps/sparc/sparc64/dl-machine.h +++ b/sysdeps/sparc/sparc64/dl-machine.h @@ -1,5 +1,5 @@ /* Machine-dependent ELF dynamic relocation inline functions. Sparc64 version. - Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 + Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -18,6 +18,9 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +#ifndef dl_machine_h +#define dl_machine_h + #define ELF_MACHINE_NAME "sparc64" #include <string.h> @@ -88,7 +91,7 @@ elf_machine_load_address (void) /* We have 4 cases to handle. And we code different code sequences for each one. I love V9 code models... */ -static inline void +static inline void __attribute__ ((always_inline)) sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc, Elf64_Addr *reloc_addr, Elf64_Addr value, Elf64_Addr high, int t) @@ -212,7 +215,7 @@ sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc, } } -static inline Elf64_Addr +static inline Elf64_Addr __attribute__ ((always_inline)) elf_machine_fixup_plt (struct link_map *map, lookup_t t, const Elf64_Rela *reloc, Elf64_Addr *reloc_addr, Elf64_Addr value) @@ -233,236 +236,21 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc, return value; } -#ifdef RESOLVE - -/* Perform the relocation specified by RELOC and SYM (which is fully resolved). - MAP is the object containing the reloc. */ - -static inline void -elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc, - const Elf64_Sym *sym, const struct r_found_version *version, - void *const reloc_addr_arg) -{ - Elf64_Addr *const reloc_addr = reloc_addr_arg; - const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info); - -#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC - if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0)) - *reloc_addr = map->l_addr + reloc->r_addend; -# ifndef RTLD_BOOTSTRAP - else if (r_type == R_SPARC_NONE) /* Who is Wilbur? */ - return; -# endif - else -#endif - { -#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP - const Elf64_Sym *const refsym = sym; -#endif - Elf64_Addr value; -#ifndef RESOLVE_CONFLICT_FIND_MAP - if (sym->st_shndx != SHN_UNDEF && - ELF64_ST_BIND (sym->st_info) == STB_LOCAL) - value = map->l_addr; - else - { - value = RESOLVE (&sym, version, r_type); - if (sym) - value += sym->st_value; - } -#else - value = 0; -#endif - value += reloc->r_addend; /* Assume copy relocs have zero addend. */ - - switch (r_type) - { -#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP - case R_SPARC_COPY: - if (sym == NULL) - /* This can happen in trace mode if an object could not be - found. */ - break; - if (sym->st_size > refsym->st_size - || (GLRO(dl_verbose) && sym->st_size < refsym->st_size)) - { - const char *strtab; - - strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]); - _dl_error_printf ("\ -%s: Symbol `%s' has different size in shared object, consider re-linking\n", - rtld_progname ?: "<program name unknown>", - strtab + refsym->st_name); - } - memcpy (reloc_addr_arg, (void *) value, - MIN (sym->st_size, refsym->st_size)); - break; -#endif - case R_SPARC_64: - case R_SPARC_GLOB_DAT: - *reloc_addr = value; - break; -#ifndef RTLD_BOOTSTRAP - case R_SPARC_8: - *(char *) reloc_addr = value; - break; - case R_SPARC_16: - *(short *) reloc_addr = value; - break; - case R_SPARC_32: - *(unsigned int *) reloc_addr = value; - break; - case R_SPARC_DISP8: - *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr); - break; - case R_SPARC_DISP16: - *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr); - break; - case R_SPARC_DISP32: - *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr); - break; - case R_SPARC_WDISP30: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & 0xc0000000) | - ((value - (Elf64_Addr) reloc_addr) >> 2)); - break; - - /* MEDLOW code model relocs */ - case R_SPARC_LO10: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & ~0x3ff) | - (value & 0x3ff)); - break; - case R_SPARC_HI22: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & 0xffc00000) | - (value >> 10)); - break; - case R_SPARC_OLO10: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & ~0x1fff) | - (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff)); - break; - - /* MEDMID code model relocs */ - case R_SPARC_H44: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & 0xffc00000) | - (value >> 22)); - break; - case R_SPARC_M44: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & ~0x3ff) | - ((value >> 12) & 0x3ff)); - break; - case R_SPARC_L44: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & ~0xfff) | - (value & 0xfff)); - break; - - /* MEDANY code model relocs */ - case R_SPARC_HH22: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & 0xffc00000) | - (value >> 42)); - break; - case R_SPARC_HM10: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & ~0x3ff) | - ((value >> 32) & 0x3ff)); - break; - case R_SPARC_LM22: - *(unsigned int *) reloc_addr = - ((*(unsigned int *)reloc_addr & 0xffc00000) | - ((value >> 10) & 0x003fffff)); - break; -#endif - case R_SPARC_JMP_SLOT: -#ifdef RESOLVE_CONFLICT_FIND_MAP - /* R_SPARC_JMP_SLOT conflicts against .plt[32768+] - relocs should be turned into R_SPARC_64 relocs - in .gnu.conflict section. - r_addend non-zero does not mean it is a .plt[32768+] - reloc, instead it is the actual address of the function - to call. */ - sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0); -#else - sparc64_fixup_plt (map, reloc, reloc_addr, value, - reloc->r_addend, 0); -#endif - break; -#ifndef RTLD_BOOTSTRAP - case R_SPARC_UA16: - ((unsigned char *) reloc_addr_arg) [0] = value >> 8; - ((unsigned char *) reloc_addr_arg) [1] = value; - break; - case R_SPARC_UA32: - ((unsigned char *) reloc_addr_arg) [0] = value >> 24; - ((unsigned char *) reloc_addr_arg) [1] = value >> 16; - ((unsigned char *) reloc_addr_arg) [2] = value >> 8; - ((unsigned char *) reloc_addr_arg) [3] = value; - break; - case R_SPARC_UA64: - if (! ((long) reloc_addr_arg & 3)) - { - /* Common in .eh_frame */ - ((unsigned int *) reloc_addr_arg) [0] = value >> 32; - ((unsigned int *) reloc_addr_arg) [1] = value; - break; - } - ((unsigned char *) reloc_addr_arg) [0] = value >> 56; - ((unsigned char *) reloc_addr_arg) [1] = value >> 48; - ((unsigned char *) reloc_addr_arg) [2] = value >> 40; - ((unsigned char *) reloc_addr_arg) [3] = value >> 32; - ((unsigned char *) reloc_addr_arg) [4] = value >> 24; - ((unsigned char *) reloc_addr_arg) [5] = value >> 16; - ((unsigned char *) reloc_addr_arg) [6] = value >> 8; - ((unsigned char *) reloc_addr_arg) [7] = value; - break; -#endif -#if !defined RTLD_BOOTSTRAP || defined _NDEBUG - default: - _dl_reloc_bad_type (map, r_type, 0); - break; -#endif - } - } -} - -static inline void -elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc, - void *const reloc_addr_arg) -{ - Elf64_Addr *const reloc_addr = reloc_addr_arg; - *reloc_addr = l_addr + reloc->r_addend; -} - -static inline void -elf_machine_lazy_rel (struct link_map *map, - Elf64_Addr l_addr, const Elf64_Rela *reloc) -{ - switch (ELF64_R_TYPE (reloc->r_info)) - { - case R_SPARC_NONE: - break; - case R_SPARC_JMP_SLOT: - break; - default: - _dl_reloc_bad_type (map, ELFW(R_TYPE) (reloc->r_info), 1); - break; - } -} - -#endif /* RESOLVE */ - /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so PLT entries should not be allowed to define the value. ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one of the main executable's symbols, as for a COPY reloc. */ -#define elf_machine_type_class(type) \ +#if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD) +# define elf_machine_type_class(type) \ + ((((type) == R_SPARC_JMP_SLOT \ + || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY)) +#else +# define elf_machine_type_class(type) \ ((((type) == R_SPARC_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY)) +#endif /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */ #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT @@ -487,74 +275,67 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile) extern void _dl_runtime_profile_1 (void); Elf64_Addr res0_addr, res1_addr; unsigned int *plt = (void *) D_PTR (l, l_info[DT_PLTGOT]); - int i = 0; - if (! profile) - { - res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0; - res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1; - } - else + if (__builtin_expect(profile, 0)) { res0_addr = (Elf64_Addr) &_dl_runtime_profile_0; res1_addr = (Elf64_Addr) &_dl_runtime_profile_1; - if (_dl_name_match_p (GLRO(dl_profile), l)) + + if (GLRO(dl_profile) != NULL + && _dl_name_match_p (GLRO(dl_profile), l)) GL(dl_profile_map) = l; } + else + { + res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0; + res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1; + } /* PLT0 looks like: - save %sp, -192, %sp - sethi %hh(_dl_runtime_{resolve,profile}_0), %l0 - sethi %lm(_dl_runtime_{resolve,profile}_0), %l1 - or %l0, %hm(_dl_runtime_{resolve,profile}_0), %l0 - or %l1, %lo(_dl_runtime_{resolve,profile}_0), %l1 - sllx %l0, 32, %l0 - jmpl %l0 + %l1, %l6 - sethi %hi(0xffc00), %l2 + sethi %uhi(_dl_runtime_{resolve,profile}_0), %g4 + sethi %hi(_dl_runtime_{resolve,profile}_0), %g5 + or %g4, %ulo(_dl_runtime_{resolve,profile}_0), %g4 + or %g5, %lo(_dl_runtime_{resolve,profile}_0), %g5 + sllx %g4, 32, %g4 + add %g4, %g5, %g5 + jmpl %g5, %g4 + nop */ - plt[0] = 0x9de3bf40; - plt[1] = 0x21000000 | (res0_addr >> (64 - 22)); - plt[2] = 0x23000000 | ((res0_addr >> 10) & 0x003fffff); - plt[3] = 0xa0142000 | ((res0_addr >> 32) & 0x3ff); - plt[4] = 0xa2146000 | (res0_addr & 0x3ff); - plt[5] = 0xa12c3020; - plt[6] = 0xadc40011; - plt[7] = 0x250003ff; + plt[0] = 0x09000000 | (res0_addr >> (64 - 22)); + plt[1] = 0x0b000000 | ((res0_addr >> 10) & 0x003fffff); + plt[2] = 0x88112000 | ((res0_addr >> 32) & 0x3ff); + plt[3] = 0x8a116000 | (res0_addr & 0x3ff); + plt[4] = 0x89293020; + plt[5] = 0x8a010005; + plt[6] = 0x89c14000; + plt[7] = 0x01000000; /* PLT1 looks like: - save %sp, -192, %sp - sethi %hh(_dl_runtime_{resolve,profile}_1), %l0 - sethi %lm(_dl_runtime_{resolve,profile}_1), %l1 - or %l0, %hm(_dl_runtime_{resolve,profile}_1), %l0 - or %l1, %lo(_dl_runtime_{resolve,profile}_1), %l1 - sllx %l0, 32, %l0 - jmpl %l0 + %l1, %l6 - srlx %g1, 12, %o1 + sethi %uhi(_dl_runtime_{resolve,profile}_1), %g4 + sethi %hi(_dl_runtime_{resolve,profile}_1), %g5 + or %g4, %ulo(_dl_runtime_{resolve,profile}_1), %g4 + or %g5, %lo(_dl_runtime_{resolve,profile}_1), %g5 + sllx %g4, 32, %g4 + add %g4, %g5, %g5 + jmpl %g5, %g4 + nop */ - plt[8 + 0] = 0x9de3bf40; - if (__builtin_expect (((res1_addr + 4) >> 32) & 0x3ff, 0)) - i = 1; - else - res1_addr += 4; - plt[8 + 1] = 0x21000000 | (res1_addr >> (64 - 22)); - plt[8 + 2] = 0x23000000 | ((res1_addr >> 10) & 0x003fffff); - if (__builtin_expect (i, 0)) - plt[8 + 3] = 0xa0142000 | ((res1_addr >> 32) & 0x3ff); - else - plt[8 + 3] = 0xa12c3020; - plt[8 + 4] = 0xa2146000 | (res1_addr & 0x3ff); - if (__builtin_expect (i, 0)) - plt[8 + 5] = 0xa12c3020; - plt[8 + 5 + i] = 0xadc40011; - plt[8 + 6 + i] = 0x9330700c; + plt[8] = 0x09000000 | (res1_addr >> (64 - 22)); + plt[9] = 0x0b000000 | ((res1_addr >> 10) & 0x003fffff); + plt[10] = 0x88112000 | ((res1_addr >> 32) & 0x3ff); + plt[11] = 0x8a116000 | (res1_addr & 0x3ff); + plt[12] = 0x89293020; + plt[13] = 0x8a010005; + plt[14] = 0x89c14000; + plt[15] = 0x01000000; /* Now put the magic cookie at the beginning of .PLT2 Entry .PLT3 is unused by this implementation. */ - *((struct link_map **)(&plt[16 + 0])) = l; + *((struct link_map **)(&plt[16])) = l; if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0) || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0)) @@ -601,68 +382,6 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile) return lazy; } -/* This code is used in dl-runtime.c to call the `fixup' function - and then redirect to the address it returns. */ -#define TRAMPOLINE_TEMPLATE(tramp_name, fixup_name) \ - asm ("\n" \ -" .text\n" \ -" .globl " #tramp_name "_0\n" \ -" .type " #tramp_name "_0, @function\n" \ -" .align 32\n" \ -"\t" #tramp_name "_0:\n" \ -" ! sethi %hi(1047552), %l2 - Done in .PLT0\n" \ -" ldx [%l6 + 32 + 8], %o0\n" \ -" sub %g1, %l6, %l0\n" \ -" xor %l2, -1016, %l2\n" \ -" sethi %hi(5120), %l3 ! 160 * 32\n" \ -" add %l0, %l2, %l0\n" \ -" sethi %hi(32768), %l4\n" \ -" udivx %l0, %l3, %l3\n" \ -" sllx %l3, 2, %l1\n" \ -" add %l1, %l3, %l1\n" \ -" sllx %l1, 10, %l2\n" \ -" sub %l4, 4, %l4 ! No thanks to Sun for not obeying their own ABI\n" \ -" sllx %l1, 5, %l1\n" \ -" sub %l0, %l2, %l0\n" \ -" udivx %l0, 24, %l0\n" \ -" add %l0, %l4, %l0\n" \ -" add %l1, %l0, %l1\n" \ -" add %l1, %l1, %l0\n" \ -" add %l0, %l1, %l0\n" \ -" mov %i7, %o2\n" \ -" call " #fixup_name "\n" \ -" sllx %l0, 3, %o1\n" \ -" jmp %o0\n" \ -" restore\n" \ -" .size " #tramp_name "_0, . - " #tramp_name "_0\n" \ -"\n" \ -" .globl " #tramp_name "_1\n" \ -" .type " #tramp_name "_1, @function\n" \ -" ! tramp_name_1 + 4 needs to be .align 32\n" \ -"\t" #tramp_name "_1:\n" \ -" sub %l6, 4, %l6\n" \ -" ! srlx %g1, 12, %o1 - Done in .PLT1\n" \ -" ldx [%l6 + 12], %o0\n" \ -" add %o1, %o1, %o3\n" \ -" sub %o1, 96, %o1 ! No thanks to Sun for not obeying their own ABI\n" \ -" mov %i7, %o2\n" \ -" call " #fixup_name "\n" \ -" add %o1, %o3, %o1\n" \ -" jmp %o0\n" \ -" restore\n" \ -" .size " #tramp_name "_1, . - " #tramp_name "_1\n" \ -" .previous\n"); - -#ifndef PROF -#define ELF_MACHINE_RUNTIME_TRAMPOLINE \ - TRAMPOLINE_TEMPLATE (_dl_runtime_resolve, fixup); \ - TRAMPOLINE_TEMPLATE (_dl_runtime_profile, profile_fixup); -#else -#define ELF_MACHINE_RUNTIME_TRAMPOLINE \ - TRAMPOLINE_TEMPLATE (_dl_runtime_resolve, fixup); \ - TRAMPOLINE_TEMPLATE (_dl_runtime_profile, fixup); -#endif - /* The PLT uses Elf64_Rela relocs. */ #define elf_machine_relplt elf_machine_rela @@ -763,3 +482,291 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile) " add %sp, 6*8, %sp\n" \ " .size _dl_start_user, . - _dl_start_user\n" \ " .previous\n"); + +#endif /* dl_machine_h */ + +#define ARCH_LA_PLTENTER sparc64_gnu_pltenter +#define ARCH_LA_PLTEXIT sparc64_gnu_pltexit + +#ifdef RESOLVE_MAP + +/* Perform the relocation specified by RELOC and SYM (which is fully resolved). + MAP is the object containing the reloc. */ + +auto inline void +__attribute__ ((always_inline)) +elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc, + const Elf64_Sym *sym, const struct r_found_version *version, + void *const reloc_addr_arg) +{ + Elf64_Addr *const reloc_addr = reloc_addr_arg; +#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP + const Elf64_Sym *const refsym = sym; +#endif + Elf64_Addr value; + const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info); +#if !defined RESOLVE_CONFLICT_FIND_MAP + struct link_map *sym_map = NULL; +#endif + +#if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC + /* This is defined in rtld.c, but nowhere in the static libc.a; make the + reference weak so static programs can still link. This declaration + cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP) + because rtld.c contains the common defn for _dl_rtld_map, which is + incompatible with a weak decl in the same file. */ + weak_extern (_dl_rtld_map); +#endif + + if (__builtin_expect (r_type == R_SPARC_NONE, 0)) + return; + +#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC + if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0)) + { +# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC + if (map != &_dl_rtld_map) /* Already done in rtld itself. */ +# endif + *reloc_addr += map->l_addr + reloc->r_addend; + return; + } +#endif + +#ifndef RESOLVE_CONFLICT_FIND_MAP + if (__builtin_expect (ELF64_ST_BIND (sym->st_info) == STB_LOCAL, 0) + && sym->st_shndx != SHN_UNDEF) + { + value = map->l_addr; + } + else + { + sym_map = RESOLVE_MAP (&sym, version, r_type); + value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value; + } +#else + value = 0; +#endif + + value += reloc->r_addend; /* Assume copy relocs have zero addend. */ + + switch (r_type) + { +#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP + case R_SPARC_COPY: + if (sym == NULL) + /* This can happen in trace mode if an object could not be + found. */ + break; + if (sym->st_size > refsym->st_size + || (GLRO(dl_verbose) && sym->st_size < refsym->st_size)) + { + const char *strtab; + + strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]); + _dl_error_printf ("\ +%s: Symbol `%s' has different size in shared object, consider re-linking\n", + rtld_progname ?: "<program name unknown>", + strtab + refsym->st_name); + } + memcpy (reloc_addr_arg, (void *) value, + MIN (sym->st_size, refsym->st_size)); + break; +#endif + case R_SPARC_64: + case R_SPARC_GLOB_DAT: + *reloc_addr = value; + break; + case R_SPARC_JMP_SLOT: +#ifdef RESOLVE_CONFLICT_FIND_MAP + /* R_SPARC_JMP_SLOT conflicts against .plt[32768+] + relocs should be turned into R_SPARC_64 relocs + in .gnu.conflict section. + r_addend non-zero does not mean it is a .plt[32768+] + reloc, instead it is the actual address of the function + to call. */ + sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0); +#else + sparc64_fixup_plt (map, reloc, reloc_addr, value, reloc->r_addend, 0); +#endif + break; +#if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD) \ + && !defined RESOLVE_CONFLICT_FIND_MAP + case R_SPARC_TLS_DTPMOD64: + /* Get the information from the link map returned by the + resolv function. */ + if (sym_map != NULL) + *reloc_addr = sym_map->l_tls_modid; + break; + case R_SPARC_TLS_DTPOFF64: + /* During relocation all TLS symbols are defined and used. + Therefore the offset is already correct. */ + *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend; + break; + case R_SPARC_TLS_TPOFF64: + /* The offset is negative, forward from the thread pointer. */ + /* We know the offset of object the symbol is contained in. + It is a negative value which will be added to the + thread pointer. */ + if (sym != NULL) + { + CHECK_STATIC_TLS (map, sym_map); + *reloc_addr = sym->st_value - sym_map->l_tls_offset + + reloc->r_addend; + } + break; +# ifndef RTLD_BOOTSTRAP + case R_SPARC_TLS_LE_HIX22: + case R_SPARC_TLS_LE_LOX10: + if (sym != NULL) + { + CHECK_STATIC_TLS (map, sym_map); + value = sym->st_value - sym_map->l_tls_offset + + reloc->r_addend; + if (r_type == R_SPARC_TLS_LE_HIX22) + *reloc_addr = (*reloc_addr & 0xffc00000) + | (((~value) >> 10) & 0x3fffff); + else + *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff) + | 0x1c00; + } + break; +# endif +#endif +#ifndef RTLD_BOOTSTRAP + case R_SPARC_8: + *(char *) reloc_addr = value; + break; + case R_SPARC_16: + *(short *) reloc_addr = value; + break; + case R_SPARC_32: + *(unsigned int *) reloc_addr = value; + break; + case R_SPARC_DISP8: + *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr); + break; + case R_SPARC_DISP16: + *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr); + break; + case R_SPARC_DISP32: + *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr); + break; + case R_SPARC_WDISP30: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & 0xc0000000) | + (((value - (Elf64_Addr) reloc_addr) >> 2) & 0x3fffffff)); + break; + + /* MEDLOW code model relocs */ + case R_SPARC_LO10: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0x3ff) | + (value & 0x3ff)); + break; + case R_SPARC_HI22: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & 0xffc00000) | + ((value >> 10) & 0x3fffff)); + break; + case R_SPARC_OLO10: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0x1fff) | + (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff)); + break; + + /* MEDMID code model relocs */ + case R_SPARC_H44: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & 0xffc00000) | + ((value >> 22) & 0x3fffff)); + break; + case R_SPARC_M44: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0x3ff) | + ((value >> 12) & 0x3ff)); + break; + case R_SPARC_L44: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0xfff) | + (value & 0xfff)); + break; + + /* MEDANY code model relocs */ + case R_SPARC_HH22: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & 0xffc00000) | + (value >> 42)); + break; + case R_SPARC_HM10: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0x3ff) | + ((value >> 32) & 0x3ff)); + break; + case R_SPARC_LM22: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & 0xffc00000) | + ((value >> 10) & 0x003fffff)); + break; + case R_SPARC_UA16: + ((unsigned char *) reloc_addr_arg) [0] = value >> 8; + ((unsigned char *) reloc_addr_arg) [1] = value; + break; + case R_SPARC_UA32: + ((unsigned char *) reloc_addr_arg) [0] = value >> 24; + ((unsigned char *) reloc_addr_arg) [1] = value >> 16; + ((unsigned char *) reloc_addr_arg) [2] = value >> 8; + ((unsigned char *) reloc_addr_arg) [3] = value; + break; + case R_SPARC_UA64: + if (! ((long) reloc_addr_arg & 3)) + { + /* Common in .eh_frame */ + ((unsigned int *) reloc_addr_arg) [0] = value >> 32; + ((unsigned int *) reloc_addr_arg) [1] = value; + break; + } + ((unsigned char *) reloc_addr_arg) [0] = value >> 56; + ((unsigned char *) reloc_addr_arg) [1] = value >> 48; + ((unsigned char *) reloc_addr_arg) [2] = value >> 40; + ((unsigned char *) reloc_addr_arg) [3] = value >> 32; + ((unsigned char *) reloc_addr_arg) [4] = value >> 24; + ((unsigned char *) reloc_addr_arg) [5] = value >> 16; + ((unsigned char *) reloc_addr_arg) [6] = value >> 8; + ((unsigned char *) reloc_addr_arg) [7] = value; + break; +#endif +#if !defined RTLD_BOOTSTRAP || defined _NDEBUG + default: + _dl_reloc_bad_type (map, r_type, 0); + break; +#endif + } +} + +auto inline void +__attribute__ ((always_inline)) +elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc, + void *const reloc_addr_arg) +{ + Elf64_Addr *const reloc_addr = reloc_addr_arg; + *reloc_addr = l_addr + reloc->r_addend; +} + +auto inline void +__attribute__ ((always_inline)) +elf_machine_lazy_rel (struct link_map *map, + Elf64_Addr l_addr, const Elf64_Rela *reloc) +{ + switch (ELF64_R_TYPE (reloc->r_info)) + { + case R_SPARC_NONE: + break; + case R_SPARC_JMP_SLOT: + break; + default: + _dl_reloc_bad_type (map, ELFW(R_TYPE) (reloc->r_info), 1); + break; + } +} + +#endif /* RESOLVE_MAP */ diff --git a/sysdeps/sparc/sparc64/dl-trampoline.S b/sysdeps/sparc/sparc64/dl-trampoline.S new file mode 100644 index 0000000..81e5c55 --- /dev/null +++ b/sysdeps/sparc/sparc64/dl-trampoline.S @@ -0,0 +1,327 @@ +/* PLT trampolines. Sparc 64-bit version. + Copyright (C) 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> + + .text + .align 32 + + /* %g1: PLT offset loaded by PLT entry + * %g4: callers PC, which is PLT0 + 24, therefore we + * add (32 + 8) to get the address of PLT2 which + * is where the magic cookie is stored + */ + .globl _dl_runtime_resolve_0 + .type _dl_runtime_resolve_0, @function +_dl_runtime_resolve_0: + cfi_startproc + + save %sp, -192, %sp + cfi_def_cfa_register(%fp) + cfi_window_save + cfi_register(%o7, %i7) + + sethi %hi(1047552), %l2 + ldx [%g4 + 32 + 8], %o0 + sub %g1, %g4, %l0 + xor %l2, -1016, %l2 + sethi %hi(5120), %l3 /* 160 * 32 */ + add %l0, %l2, %l0 + sethi %hi(32768), %l4 + udivx %l0, %l3, %l3 + sllx %l3, 2, %l1 + add %l1, %l3, %l1 + sllx %l1, 10, %l2 + sub %l4, 4, %l4 + sllx %l1, 5, %l1 + sub %l0, %l2, %l0 + udivx %l0, 24, %l0 + add %l0, %l4, %l0 + add %l1, %l0, %l1 + add %l1, %l1, %l0 + add %l0, %l1, %l0 + call _dl_fixup + sllx %l0, 3, %o1 + jmp %o0 + restore + + cfi_endproc + + .size _dl_runtime_resolve_0, .-_dl_runtime_resolve_0 + + /* %g1: PLT offset loaded by PLT entry + * %g4: callers PC, which is PLT1 + 24, therefore we + * add 8 to get the address of PLT2 which + * is where the magic cookie is stored + */ + .globl _dl_runtime_resolve_1 + .type _dl_runtime_resolve_1, @function +_dl_runtime_resolve_1: + cfi_startproc + + save %sp, -192, %sp + cfi_def_cfa_register(%fp) + cfi_window_save + cfi_register(%o7, %i7) + + srlx %g1, 12, %o1 + ldx [%g4 + 8], %o0 + add %o1, %o1, %o3 + sub %o1, 96, %o1 + call _dl_fixup + add %o1, %o3, %o1 + jmp %o0 + restore + + cfi_endproc + + .size _dl_runtime_resolve_1, .-_dl_runtime_resolve_1 + + /* For the profiling cases we pass in our stack frame + * as the base of the La_sparc64_regs, so it looks + * like: + * %l0 %sp + * ... + * %l7 %sp + (7 * 8) + * %i0 %sp + (8 * 8) + * ... + * %i7 %sp + (15 * 8) + * %f0 %sp + (16 * 8) + * %f16 %sp + (31 * 8) + * framesize %sp + (32 * 8) + */ + + .globl _dl_profile_save_regs + .type _dl_profile_save_regs, @function +_dl_profile_save_regs: + cfi_startproc + + stx %l0, [%sp + STACK_BIAS + ( 0 * 8)] + stx %l1, [%sp + STACK_BIAS + ( 1 * 8)] + stx %l2, [%sp + STACK_BIAS + ( 2 * 8)] + stx %l3, [%sp + STACK_BIAS + ( 3 * 8)] + stx %l4, [%sp + STACK_BIAS + ( 4 * 8)] + stx %l5, [%sp + STACK_BIAS + ( 5 * 8)] + stx %l6, [%sp + STACK_BIAS + ( 6 * 8)] + stx %l7, [%sp + STACK_BIAS + ( 7 * 8)] + stx %i0, [%sp + STACK_BIAS + ( 8 * 8)] + stx %i1, [%sp + STACK_BIAS + ( 9 * 8)] + stx %i2, [%sp + STACK_BIAS + (10 * 8)] + stx %i3, [%sp + STACK_BIAS + (11 * 8)] + stx %i4, [%sp + STACK_BIAS + (12 * 8)] + stx %i5, [%sp + STACK_BIAS + (13 * 8)] + stx %i6, [%sp + STACK_BIAS + (14 * 8)] + stx %i7, [%sp + STACK_BIAS + (15 * 8)] + std %f0, [%sp + STACK_BIAS + (16 * 8)] + std %f2, [%sp + STACK_BIAS + (17 * 8)] + std %f4, [%sp + STACK_BIAS + (18 * 8)] + std %f6, [%sp + STACK_BIAS + (19 * 8)] + std %f8, [%sp + STACK_BIAS + (20 * 8)] + std %f10, [%sp + STACK_BIAS + (21 * 8)] + std %f12, [%sp + STACK_BIAS + (22 * 8)] + std %f14, [%sp + STACK_BIAS + (23 * 8)] + std %f16, [%sp + STACK_BIAS + (24 * 8)] + std %f18, [%sp + STACK_BIAS + (25 * 8)] + std %f20, [%sp + STACK_BIAS + (26 * 8)] + std %f22, [%sp + STACK_BIAS + (27 * 8)] + std %f24, [%sp + STACK_BIAS + (28 * 8)] + std %f26, [%sp + STACK_BIAS + (29 * 8)] + std %f28, [%sp + STACK_BIAS + (30 * 8)] + retl + std %f30, [%sp + STACK_BIAS + (31 * 8)] + + cfi_endproc + + .size _dl_profile_save_regs, .-_dl_profile_save_regs + + /* If we are going to call pltexit, then we must replicate + * the caller's stack frame. + * %o0: PLT resolved function address + */ + .globl _dl_profile_invoke + .type _dl_profile_invoke, @function +_dl_profile_invoke: + cfi_startproc + + sub %sp, %l0, %sp +1: + srlx %l0, 3, %l7 + mov %o0, %l1 + mov %i0, %o0 + mov %i1, %o1 + mov %i2, %o2 + mov %i3, %o3 + mov %i4, %o4 + mov %i5, %o5 + add %fp, STACK_BIAS, %l2 + add %sp, STACK_BIAS, %l3 +1: ldx [%l2], %l4 + add %l2, 0x8, %l2 + subcc %l7, 1, %l7 + stx %l4, [%l3] + bne,pt %xcc, 1b + add %l3, 0x8, %l3 + + jmpl %l1, %o7 + nop + + stx %o0, [%sp + STACK_BIAS + (16 * 8)] + stx %o1, [%sp + STACK_BIAS + (17 * 8)] + stx %o2, [%sp + STACK_BIAS + (18 * 8)] + stx %o3, [%sp + STACK_BIAS + (19 * 8)] + std %f0, [%sp + STACK_BIAS + (20 * 8)] + std %f2, [%sp + STACK_BIAS + (21 * 8)] + std %f4, [%sp + STACK_BIAS + (22 * 8)] + std %f8, [%sp + STACK_BIAS + (23 * 8)] + + mov %l5, %o0 + mov %l6, %o1 + add %sp, %l0, %o2 + add %sp, STACK_BIAS + (16 * 8), %o3 + call _dl_call_pltexit + add %o2, STACK_BIAS, %o2 + + ldx [%sp + STACK_BIAS + (16 * 8)], %i0 + ldx [%sp + STACK_BIAS + (17 * 8)], %i1 + ldx [%sp + STACK_BIAS + (18 * 8)], %i2 + ldx [%sp + STACK_BIAS + (19 * 8)], %i3 + + jmpl %i7 + 8, %g0 + restore + + cfi_endproc + + .size _dl_profile_invoke, .-_dl_profile_invoke + + /* %g1: PLT offset loaded by PLT entry + * %g4: callers PC, which is PLT0 + 24, therefore we + * add (32 + 8) to get the address of PLT2 which + * is where the magic cookie is stored + */ + .align 32 + .globl _dl_runtime_profile_0 + .type _dl_runtime_profile_0, @function +_dl_runtime_profile_0: + cfi_startproc + + brz,a,pn %fp, 1f + mov 192, %g5 + sub %fp, %sp, %g5 +1: save %sp, -336, %sp + cfi_def_cfa_register(%fp) + cfi_window_save + cfi_register(%o7, %i7) + + sethi %hi(1047552), %l2 + ldx [%g4 + 32 + 8], %o0 + sub %g1, %g4, %l0 + xor %l2, -1016, %l2 + sethi %hi(5120), %l3 /* 160 * 32 */ + add %l0, %l2, %l0 + sethi %hi(32768), %l4 + udivx %l0, %l3, %l3 + sllx %l3, 2, %l1 + add %l1, %l3, %l1 + sllx %l1, 10, %l2 + sub %l4, 4, %l4 + sllx %l1, 5, %l1 + sub %l0, %l2, %l0 + udivx %l0, 24, %l0 + add %l0, %l4, %l0 + add %l1, %l0, %l1 + add %l1, %l1, %l0 + add %l0, %l1, %l0 + + mov %i7, %o2 + sllx %l0, 3, %o1 + + mov %g5, %l0 + mov %o0, %l5 + mov %o1, %l6 + + call _dl_profile_save_regs + nop + + add %sp, STACK_BIAS, %o3 + call _dl_profile_fixup + add %sp, (STACK_BIAS + (32 * 8)), %o4 + + ldx [%sp + STACK_BIAS + (32 * 8)], %o1 + brgez,pt %o1, 1f + nop + + call _dl_profile_invoke + nop + +1: jmp %o0 + restore + + cfi_endproc + + .size _dl_runtime_profile_0, .-_dl_runtime_profile_0 + + /* %g1: PLT offset loaded by PLT entry + * %g4: callers PC, which is PLT1 + 24, therefore we + * add 8 to get the address of PLT2 which + * is where the magic cookie is stored + */ + .globl _dl_runtime_profile_1 + .type _dl_runtime_profile_1, @function +_dl_runtime_profile_1: + cfi_startproc + + brz,a,pn %fp, 1f + mov 192, %g5 + sub %fp, %sp, %g5 +1: save %sp, -336, %sp + cfi_def_cfa_register(%fp) + cfi_window_save + cfi_register(%o7, %i7) + + srlx %g1, 12, %o1 + ldx [%g4 + 8], %o0 + add %o1, %o1, %o3 + sub %o1, 96, %o1 + mov %i7, %o2 + add %o1, %o3, %o1 + + mov %g5, %l0 + mov %o0, %l5 + mov %o1, %l6 + + call _dl_profile_save_regs + nop + + add %sp, STACK_BIAS, %o3 + call _dl_profile_fixup + add %sp, (STACK_BIAS + (32 * 8)), %o4 + + ldx [%sp + STACK_BIAS + (32 * 8)], %o1 + brgez,pt %o1, 1f + nop + + call _dl_profile_invoke + nop + +1: jmp %o0 + restore + + cfi_endproc + + .size _dl_runtime_resolve_1, .-_dl_runtime_resolve_1 diff --git a/sysdeps/sparc/sparc64/elf/start.S b/sysdeps/sparc/sparc64/elf/start.S index b0ef9d3..df44cae 100644 --- a/sysdeps/sparc/sparc64/elf/start.S +++ b/sysdeps/sparc/sparc64/elf/start.S @@ -48,6 +48,7 @@ .global _start .type _start,#function _start: + cfi_startproc #ifdef SHARED sethi %hi(_GLOBAL_OFFSET_TABLE_-4), %l7 @@ -59,6 +60,7 @@ _start: drop their arguments. */ mov %g0, %fp sub %sp, 6*8, %sp + cfi_adjust_cfa_offset(6*8) /* Extract the arguments and environment as encoded on the stack. The argument info starts after one register window (16 words) past the SP, @@ -92,4 +94,12 @@ _start: /* Die very horribly if exit returns. */ illtrap 0 + cfi_endproc + .size _start, .-_start + +/* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: +weak_alias (__data_start, data_start) diff --git a/sysdeps/sparc/sparc64/fpu/libm-test-ulps b/sysdeps/sparc/sparc64/fpu/libm-test-ulps index 4888dd2..db5543e 100644 --- a/sysdeps/sparc/sparc64/fpu/libm-test-ulps +++ b/sysdeps/sparc/sparc64/fpu/libm-test-ulps @@ -33,14 +33,14 @@ ildouble: 1 ldouble: 1 # cacosh -Test "Real part of: cacosh (-2 - 3 i) == -1.9833870299165354323470769028940395 + 2.1414491111159960199416055713254211 i": +Test "Real part of: cacosh (-2 - 3 i) == 1.9833870299165354323470769028940395 - 2.1414491111159960199416055713254211 i": double: 1 float: 7 idouble: 1 ifloat: 7 ildouble: 5 ldouble: 5 -Test "Imaginary part of: cacosh (-2 - 3 i) == -1.9833870299165354323470769028940395 + 2.1414491111159960199416055713254211 i": +Test "Imaginary part of: cacosh (-2 - 3 i) == 1.9833870299165354323470769028940395 - 2.1414491111159960199416055713254211 i": double: 1 float: 3 idouble: 1 @@ -465,6 +465,11 @@ ifloat: 2 ildouble: 1 ldouble: 1 +# exp2 +Test "exp2 (10) == 1024": +ildouble: 2 +ldouble: 2 + # expm1 Test "expm1 (0.75) == 1.11700001661267466854536981983709561": double: 1 @@ -1192,6 +1197,10 @@ ifloat: 2 ildouble: 1 ldouble: 1 +Function: "exp2": +ildouble: 2 +ldouble: 2 + Function: "expm1": double: 1 float: 1 diff --git a/sysdeps/sparc/sparc64/jmpbuf-unwind.h b/sysdeps/sparc/sparc64/jmpbuf-unwind.h new file mode 100644 index 0000000..f7eed15 --- /dev/null +++ b/sysdeps/sparc/sparc64/jmpbuf-unwind.h @@ -0,0 +1,37 @@ +/* Copyright (C) 2005, 2006 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by David S. Miller <davem@davemloft.net>, 2005. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <setjmp.h> +#include <stdint.h> +#include <unwind.h> + +/* Test if longjmp to JMPBUF would unwind the frame + containing a local variable at ADDRESS. */ +#define _JMPBUF_UNWINDS(jmpbuf, address, demangle) \ + ((unsigned long int) (address) < (jmpbuf)->uc_mcontext.mc_fp + 2047) + +#define _JMPBUF_CFA_UNWINDS_ADJ(_jmpbuf, _context, _adj) \ + _JMPBUF_UNWINDS_ADJ (_jmpbuf, (void *) _Unwind_GetCFA (_context), _adj) + +#define _JMPBUF_UNWINDS_ADJ(_jmpbuf, _address, _adj) \ + ((uintptr_t) (_address) - (_adj) \ + < (uintptr_t) (_jmpbuf)[0].uc_mcontext.mc_fp + 2047 - (_adj)) + +/* We use the normal lobngjmp for unwinding. */ +#define __libc_unwind_longjmp(buf, val) __libc_longjmp (buf, val) diff --git a/sysdeps/sparc/sparc64/memcmp.S b/sysdeps/sparc/sparc64/memcmp.S index 074767b..f9502d6 100644 --- a/sysdeps/sparc/sparc64/memcmp.S +++ b/sysdeps/sparc/sparc64/memcmp.S @@ -139,5 +139,5 @@ ENTRY(memcmp) END(memcmp) #undef bcmp -weak_alias(memcmp, bcmp) +weak_alias (memcmp, bcmp) libc_hidden_builtin_def (memcmp) diff --git a/sysdeps/sparc/sparc64/memcpy.S b/sysdeps/sparc/sparc64/memcpy.S index 3742573..5993358 100644 --- a/sysdeps/sparc/sparc64/memcpy.S +++ b/sysdeps/sparc/sparc64/memcpy.S @@ -209,6 +209,7 @@ ENTRY(bcopy) END(bcopy) .align 32 +ENTRY(__memcpy_large) 200: be,pt %xcc, 201f /* CTI */ andcc %o0, 0x38, %g5 /* IEU1 Group */ mov 8, %g1 /* IEU0 */ @@ -443,6 +444,7 @@ END(bcopy) stb %o5, [%o0 - 1] /* Store */ 209: retl mov %g4, %o0 +END(__memcpy_large) #ifdef USE_BPR @@ -698,6 +700,7 @@ ENTRY(memcpy) END(memcpy) .align 32 +ENTRY(__memmove_slowpath) 228: andcc %o2, 1, %g0 /* IEU1 Group */ be,pt %icc, 2f+4 /* CTI */ 1: ldub [%o1 - 1], %o5 /* LOAD Group */ @@ -718,6 +721,7 @@ END(memcpy) mov %g4, %o0 219: retl nop +END(__memmove_slowpath) .align 32 ENTRY(memmove) @@ -914,8 +918,8 @@ ENTRY(memmove) END(memmove) #ifdef USE_BPR -weak_alias(memcpy, __align_cpy_1) -weak_alias(memcpy, __align_cpy_2) +weak_alias (memcpy, __align_cpy_1) +weak_alias (memcpy, __align_cpy_2) #endif libc_hidden_builtin_def (memcpy) libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/memset.S b/sysdeps/sparc/sparc64/memset.S index b34ccc8..99624ba 100644 --- a/sysdeps/sparc/sparc64/memset.S +++ b/sysdeps/sparc/sparc64/memset.S @@ -312,4 +312,4 @@ ENTRY(__bzero) mov %o5, %o0 END(__bzero) -weak_alias(__bzero, bzero) +weak_alias (__bzero, bzero) diff --git a/sysdeps/sparc/sparc64/soft-fp/Dist b/sysdeps/sparc/sparc64/soft-fp/Dist deleted file mode 100644 index f98d85f..0000000 --- a/sysdeps/sparc/sparc64/soft-fp/Dist +++ /dev/null @@ -1,32 +0,0 @@ -qp_add.c -qp_cmp.c -qp_cmpe.c -qp_div.c -qp_dtoq.c -qp_feq.c -qp_fge.c -qp_fgt.c -qp_fle.c -qp_flt.c -qp_fne.c -qp_itoq.c -qp_mul.c -qp_neg.S -qp_qtod.c -qp_qtoi.c -qp_qtos.c -qp_qtoui.c -qp_qtoux.c -qp_qtox.c -qp_sqrt.c -qp_stoq.c -qp_sub.c -qp_uitoq.c -qp_util.c -qp_uxtoq.c -qp_xtoq.c -s_frexpl.c -s_ilogbl.c -s_scalblnl.c -s_scalbnl.c -sfp-machine.h diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_add.c b/sysdeps/sparc/sparc64/soft-fp/qp_add.c index bcfd0ff..eced23f 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_add.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_add.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (*a) + (*b) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,10 +29,10 @@ void _Qp_add(long double *c, const long double *a, const long double *b) FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(C); FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); - FP_UNPACK_QP(B, b); + FP_UNPACK_SEMIRAW_QP(A, a); + FP_UNPACK_SEMIRAW_QP(B, b); FP_ADD_Q(C, A, B); - FP_PACK_QP(c, C); + FP_PACK_SEMIRAW_QP(c, C); QP_HANDLE_EXCEPTIONS(__asm ( " ldd [%1], %%f52\n" " ldd [%1+8], %%f54\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_dtoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_dtoq.c index 9ba2fac..63f3d74 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_dtoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_dtoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -31,13 +31,13 @@ void _Qp_dtoq(long double *c, const double a) FP_DECL_Q(C); FP_INIT_ROUNDMODE; - FP_UNPACK_D(A, a); + FP_UNPACK_RAW_D(A, a); #if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q - FP_CONV(Q,D,4,2,C,A); + FP_EXTEND(Q,D,4,2,C,A); #else - FP_CONV(Q,D,2,1,C,A); + FP_EXTEND(Q,D,2,1,C,A); #endif - FP_PACK_QP(c, C); + FP_PACK_RAW_QP(c, C); QP_HANDLE_EXCEPTIONS(__asm ( " fdtoq %1, %%f60\n" " std %%f60, [%0]\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_itoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_itoq.c index dd28b6a..310d70c 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_itoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_itoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,7 +29,7 @@ void _Qp_itoq(long double *c, const int a) FP_DECL_Q(C); int b = a; - FP_FROM_INT_Q(C, b, 32, int); - FP_PACK_QP(c, C); + FP_FROM_INT_Q(C, b, 32, unsigned int); + FP_PACK_RAW_QP(c, C); QP_NO_EXCEPTIONS; } diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtod.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtod.c index c86967a..2e5edad 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtod.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtod.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (double)(*a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -32,13 +32,13 @@ double _Qp_qtod(const long double *a) double r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_SEMIRAW_QP(A, a); #if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q - FP_CONV(D,Q,2,4,R,A); + FP_TRUNC(D,Q,2,4,R,A); #else - FP_CONV(D,Q,1,2,R,A); + FP_TRUNC(D,Q,1,2,R,A); #endif - FP_PACK_D(r, R); + FP_PACK_SEMIRAW_D(r, R); QP_HANDLE_EXCEPTIONS(__asm ( " ldd [%1], %%f52\n" " ldd [%1+8], %%f54\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtoi.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtoi.c index cf8aba7..a402536 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtoi.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtoi.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (int)(*a) - Copyright (C) 1997, 1999, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -28,10 +28,10 @@ int _Qp_qtoi(const long double *a) { FP_DECL_EX; FP_DECL_Q(A); - int r; + unsigned int r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_RAW_QP(A, a); FP_TO_INT_Q(r, A, 32, 1); QP_HANDLE_EXCEPTIONS( int rx; diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtos.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtos.c index 52d52d6..f5f9cdb 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtos.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtos.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (float)(*a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -32,13 +32,13 @@ float _Qp_qtos(const long double *a) float r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_SEMIRAW_QP(A, a); #if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q - FP_CONV(S,Q,1,4,R,A); + FP_TRUNC(S,Q,1,4,R,A); #else - FP_CONV(S,Q,1,2,R,A); + FP_TRUNC(S,Q,1,2,R,A); #endif - FP_PACK_S(r, R); + FP_PACK_SEMIRAW_S(r, R); QP_HANDLE_EXCEPTIONS(__asm ( " ldd [%1], %%f52\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtoui.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtoui.c index abee50b..884d23e 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtoui.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtoui.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (unsigned int)(*a) - Copyright (C) 1997, 1999, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -31,7 +31,7 @@ unsigned int _Qp_qtoui(const long double *a) unsigned int r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_RAW_QP(A, a); FP_TO_INT_Q(r, A, 32, -1); QP_HANDLE_EXCEPTIONS( int rx; diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtoux.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtoux.c index 87c8478..4b6f265 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtoux.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtoux.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (unsigned long)(*a) - Copyright (C) 1997, 1999, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -31,7 +31,7 @@ unsigned long _Qp_qtoux(const long double *a) unsigned long r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_RAW_QP(A, a); FP_TO_INT_Q(r, A, 64, -1); QP_HANDLE_EXCEPTIONS( unsigned long rx; diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_qtox.c b/sysdeps/sparc/sparc64/soft-fp/qp_qtox.c index 3548119..fac6166 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_qtox.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_qtox.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. Return (long)(*a) - Copyright (C) 1997, 1999, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -28,10 +28,10 @@ long _Qp_qtox(const long double *a) { FP_DECL_EX; FP_DECL_Q(A); - long r; + unsigned long r; FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); + FP_UNPACK_RAW_QP(A, a); FP_TO_INT_Q(r, A, 64, 1); QP_HANDLE_EXCEPTIONS( long rx; diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_stoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_stoq.c index 6558131..1a4a03d 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_stoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_stoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -31,13 +31,13 @@ void _Qp_stoq(long double *c, const float a) FP_DECL_Q(C); FP_INIT_ROUNDMODE; - FP_UNPACK_S(A, a); + FP_UNPACK_RAW_S(A, a); #if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q - FP_CONV(Q,S,4,1,C,A); + FP_EXTEND(Q,S,4,1,C,A); #else - FP_CONV(Q,S,2,1,C,A); + FP_EXTEND(Q,S,2,1,C,A); #endif - FP_PACK_QP(c, C); + FP_PACK_RAW_QP(c, C); QP_HANDLE_EXCEPTIONS(__asm ( " fstoq %1, %%f60\n" " std %%f60, [%0]\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_sub.c b/sysdeps/sparc/sparc64/soft-fp/qp_sub.c index b1ed15a..056224f 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_sub.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_sub.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (*a) - (*b) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,10 +29,10 @@ void _Qp_sub(long double *c, const long double *a, const long double *b) FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(C); FP_INIT_ROUNDMODE; - FP_UNPACK_QP(A, a); - FP_UNPACK_QP(B, b); + FP_UNPACK_SEMIRAW_QP(A, a); + FP_UNPACK_SEMIRAW_QP(B, b); FP_SUB_Q(C, A, B); - FP_PACK_QP(c, C); + FP_PACK_SEMIRAW_QP(c, C); QP_HANDLE_EXCEPTIONS(__asm ( " ldd [%1], %%f52\n" " ldd [%1+8], %%f54\n" diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_uitoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_uitoq.c index d143ba2..a3bfaed 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_uitoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_uitoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,7 +29,7 @@ void _Qp_uitoq(long double *c, const unsigned int a) FP_DECL_Q(C); unsigned int b = a; - FP_FROM_INT_Q(C, b, 32, int); - FP_PACK_QP(c, C); + FP_FROM_INT_Q(C, b, 32, unsigned int); + FP_PACK_RAW_QP(c, C); QP_NO_EXCEPTIONS; } diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_uxtoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_uxtoq.c index 3021477..f691e4d 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_uxtoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_uxtoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,7 +29,7 @@ void _Qp_uxtoq(long double *c, const unsigned long a) FP_DECL_Q(C); unsigned long b = a; - FP_FROM_INT_Q(C, b, 64, long); - FP_PACK_QP(c, C); + FP_FROM_INT_Q(C, b, 64, unsigned long); + FP_PACK_RAW_QP(c, C); QP_NO_EXCEPTIONS; } diff --git a/sysdeps/sparc/sparc64/soft-fp/qp_xtoq.c b/sysdeps/sparc/sparc64/soft-fp/qp_xtoq.c index 9b4d169..b7a6102 100644 --- a/sysdeps/sparc/sparc64/soft-fp/qp_xtoq.c +++ b/sysdeps/sparc/sparc64/soft-fp/qp_xtoq.c @@ -1,6 +1,6 @@ /* Software floating-point emulation. (*c) = (long double)(*a) - Copyright (C) 1997,1999 Free Software Foundation, Inc. + Copyright (C) 1997,1999,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com) and Jakub Jelinek (jj@ultra.linux.cz). @@ -29,7 +29,7 @@ void _Qp_xtoq(long double *c, const long a) FP_DECL_Q(C); long b = a; - FP_FROM_INT_Q(C, b, 64, long); - FP_PACK_QP(c, C); + FP_FROM_INT_Q(C, b, 64, unsigned long); + FP_PACK_RAW_QP(c, C); QP_NO_EXCEPTIONS; } diff --git a/sysdeps/sparc/sparc64/soft-fp/sfp-machine.h b/sysdeps/sparc/sparc64/soft-fp/sfp-machine.h index 42e6aa8..449e955 100644 --- a/sysdeps/sparc/sparc64/soft-fp/sfp-machine.h +++ b/sysdeps/sparc/sparc64/soft-fp/sfp-machine.h @@ -1,6 +1,6 @@ /* Machine-dependent software floating-point definitions. Sparc64 userland (_Q_* and _Qp_*) version. - Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1998, 1999, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Richard Henderson (rth@cygnus.com), Jakub Jelinek (jj@ultra.linux.cz) and @@ -23,6 +23,7 @@ #include <fpu_control.h> #include <fenv.h> +#include <stdlib.h> #define _FP_W_TYPE_SIZE 64 #define _FP_W_TYPE unsigned long diff --git a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S index 8b70b0a..760d526 100644 --- a/sysdeps/sparc/sparc64/sparcv9b/memcpy.S +++ b/sysdeps/sparc/sparc64/sparcv9b/memcpy.S @@ -600,11 +600,11 @@ ENTRY(memmove) END(memmove) #ifdef USE_BPR -weak_alias(memcpy, __align_cpy_1) -weak_alias(memcpy, __align_cpy_2) -weak_alias(memcpy, __align_cpy_4) -weak_alias(memcpy, __align_cpy_8) -weak_alias(memcpy, __align_cpy_16) +weak_alias (memcpy, __align_cpy_1) +weak_alias (memcpy, __align_cpy_2) +weak_alias (memcpy, __align_cpy_4) +weak_alias (memcpy, __align_cpy_8) +weak_alias (memcpy, __align_cpy_16) #endif libc_hidden_builtin_def (memcpy) libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/sparcv9v/memcpy.S b/sysdeps/sparc/sparc64/sparcv9v/memcpy.S new file mode 100644 index 0000000..05c837f --- /dev/null +++ b/sysdeps/sparc/sparc64/sparcv9v/memcpy.S @@ -0,0 +1,593 @@ +/* Copy SIZE bytes from SRC to DEST. For SUN4V Niagara. + Copyright (C) 2006 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by David S. Miller (davem@davemloft.net) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> + +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 +#define ASI_P 0x80 +#define ASI_PNF 0x82 + +#define LOAD(type,addr,dest) type##a [addr] ASI_P, dest +#define LOAD_TWIN(addr_reg,dest0,dest1) \ + ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0 + +#define STORE(type,src,addr) type src, [addr] +#define STORE_INIT(src,addr) stxa src, [addr] %asi + +#ifndef XCC +#define USE_BPR +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + .register %g6,#scratch + + .text + .align 32 + +ENTRY(bcopy) + sub %o1, %o0, %o4 + mov %o0, %g4 + cmp %o4, %o2 + mov %o1, %o0 + bgeu,pt %XCC, 100f + mov %g4, %o1 +#ifndef USE_BPR + srl %o2, 0, %o2 +#endif + brnz,pn %o2, 220f + add %o0, %o2, %o0 + retl + nop +END(bcopy) + + .align 32 +ENTRY(memcpy) +100: /* %o0=dst, %o1=src, %o2=len */ + mov %o0, %g5 + cmp %o2, 0 + be,pn %XCC, 85f +218: or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + /* 2 blocks (128 bytes) is the minimum we can do the block + * copy with. We need to ensure that we'll iterate at least + * once in the block copy loop. At worst we'll need to align + * the destination to a 64-byte boundary which can chew up + * to (64 - 1) bytes from the length before we perform the + * block copy loop. + */ + cmp %o2, (2 * 64) + blu,pt %XCC, 70f + andcc %o3, 0x7, %g0 + + /* %o0: dst + * %o1: src + * %o2: len (known to be >= 128) + * + * The block copy loops will use %o4/%o5,%g2/%g3 as + * temporaries while copying the data. + */ + + LOAD(prefetch, %o1, #one_read) + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + + /* Align destination on 64-byte boundary. */ + andcc %o0, (64 - 1), %o4 + be,pt %XCC, 2f + sub %o4, 64, %o4 + sub %g0, %o4, %o4 ! bytes to align dst + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + LOAD(ldub, %o1, %g1) + STORE(stb, %g1, %o0) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + + /* If the source is on a 16-byte boundary we can do + * the direct block copy loop. If it is 8-byte aligned + * we can do the 16-byte loads offset by -8 bytes and the + * init stores offset by one register. + * + * If the source is not even 8-byte aligned, we need to do + * shifting and masking (basically integer faligndata). + * + * The careful bit with init stores is that if we store + * to any part of the cache line we have to store the whole + * cacheline else we can end up with corrupt L2 cache line + * contents. Since the loop works on 64-bytes of 64-byte + * aligned store data at a time, this is easy to ensure. + */ +2: + andcc %o1, (16 - 1), %o4 + andn %o2, (64 - 1), %g1 ! block copy loop iterator + sub %o2, %g1, %o2 ! final sub-block copy bytes + be,pt %XCC, 50f + cmp %o4, 8 + be,a,pt %XCC, 10f + sub %o1, 0x8, %o1 + + /* Neither 8-byte nor 16-byte aligned, shift and mask. */ + mov %g1, %o4 + and %o1, 0x7, %g1 + sll %g1, 3, %g1 + mov 64, %o3 + andn %o1, 0x7, %o1 + LOAD(ldx, %o1, %g2) + sub %o3, %g1, %o3 + sllx %g2, %g1, %g2 + +#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ + LOAD(ldx, SRC, TMP1); \ + srlx TMP1, PRE_SHIFT, TMP2; \ + or TMP2, PRE_VAL, TMP2; \ + STORE_INIT(TMP2, DST); \ + sllx TMP1, POST_SHIFT, PRE_VAL; + +1: add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) + add %o1, 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32 - 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) + subcc %o4, 64, %o4 + bne,pt %XCC, 1b + add %o0, 64, %o0 + +#undef SWIVEL_ONE_DWORD + + srl %g1, 3, %g1 + ba,pt %XCC, 60f + add %o1, %g1, %o1 + +10: /* Destination is 64-byte aligned, source was only 8-byte + * aligned but it has been subtracted by 8 and we perform + * one twin load ahead, then add 8 back into source when + * we finish the loop. + */ + LOAD_TWIN(%o1, %o4, %o5) +1: add %o1, 16, %o1 + LOAD_TWIN(%o1, %g2, %g3) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 + STORE_INIT(%o5, %o0 + 0x00) ! initializes cache line + STORE_INIT(%g2, %o0 + 0x08) + LOAD_TWIN(%o1, %o4, %o5) + add %o1, 16, %o1 + STORE_INIT(%g3, %o0 + 0x10) + STORE_INIT(%o4, %o0 + 0x18) + LOAD_TWIN(%o1, %g2, %g3) + add %o1, 16, %o1 + STORE_INIT(%o5, %o0 + 0x20) + STORE_INIT(%g2, %o0 + 0x28) + LOAD_TWIN(%o1, %o4, %o5) + STORE_INIT(%g3, %o0 + 0x30) + STORE_INIT(%o4, %o0 + 0x38) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + + ba,pt %XCC, 60f + add %o1, 0x8, %o1 + +50: /* Destination is 64-byte aligned, and source is 16-byte + * aligned. + */ +1: LOAD_TWIN(%o1, %o4, %o5) + add %o1, 16, %o1 + LOAD_TWIN(%o1, %g2, %g3) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 + STORE_INIT(%o4, %o0 + 0x00) ! initializes cache line + STORE_INIT(%o5, %o0 + 0x08) + LOAD_TWIN(%o1, %o4, %o5) + add %o1, 16, %o1 + STORE_INIT(%g2, %o0 + 0x10) + STORE_INIT(%g3, %o0 + 0x18) + LOAD_TWIN(%o1, %g2, %g3) + add %o1, 16, %o1 + STORE_INIT(%o4, %o0 + 0x20) + STORE_INIT(%o5, %o0 + 0x28) + STORE_INIT(%g2, %o0 + 0x30) + STORE_INIT(%g3, %o0 + 0x38) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + /* fall through */ + +60: + /* %o2 contains any final bytes still needed to be copied + * over. If anything is left, we copy it one byte at a time. + */ + wr %g0, ASI_PNF, %asi + brz,pt %o2, 85f + sub %o0, %o1, %o3 + ba,a,pt %XCC, 90f + + .align 64 +70: /* 16 < len <= 64 */ + bne,pn %XCC, 75f + sub %o0, %o1, %o3 + +72: + andn %o2, 0xf, %o4 + and %o2, 0xf, %o2 +1: subcc %o4, 0x10, %o4 + LOAD(ldx, %o1, %o5) + add %o1, 0x08, %o1 + LOAD(ldx, %o1, %g1) + sub %o1, 0x08, %o1 + STORE(stx, %o5, %o1 + %o3) + add %o1, 0x8, %o1 + STORE(stx, %g1, %o1 + %o3) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x8, %o2 + LOAD(ldx, %o1, %o5) + STORE(stx, %o5, %o1 + %o3) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + LOAD(lduw, %o1, %o5) + STORE(stw, %o5, %o1 + %o3) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %XCC, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: subcc %g1, 1, %g1 + LOAD(ldub, %o1, %o5) + STORE(stb, %o5, %o1 + %o3) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %XCC, 73b + +8: mov 64, %o3 + andn %o1, 0x7, %o1 + LOAD(ldx, %o1, %g2) + sub %o3, %g1, %o3 + andn %o2, 0x7, %o4 + sllx %g2, %g1, %g2 +1: add %o1, 0x8, %o1 + LOAD(ldx, %o1, %g3) + subcc %o4, 0x8, %o4 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + STORE(stx, %o5, %o0) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, 85f + add %o1, %g1, %o1 + ba,pt %XCC, 90f + sub %o0, %o1, %o3 + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + LOAD(lduw, %o1, %g1) + STORE(stw, %g1, %o1 + %o3) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov %g5, %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + LOAD(ldub, %o1, %g1) + STORE(stb, %g1, %o1 + %o3) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov %g5, %o0 + +END(memcpy) + +#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ + ldx [%src - offset - 0x20], %t0; \ + ldx [%src - offset - 0x18], %t1; \ + ldx [%src - offset - 0x10], %t2; \ + ldx [%src - offset - 0x08], %t3; \ + stw %t0, [%dst - offset - 0x1c]; \ + srlx %t0, 32, %t0; \ + stw %t0, [%dst - offset - 0x20]; \ + stw %t1, [%dst - offset - 0x14]; \ + srlx %t1, 32, %t1; \ + stw %t1, [%dst - offset - 0x18]; \ + stw %t2, [%dst - offset - 0x0c]; \ + srlx %t2, 32, %t2; \ + stw %t2, [%dst - offset - 0x10]; \ + stw %t3, [%dst - offset - 0x04]; \ + srlx %t3, 32, %t3; \ + stw %t3, [%dst - offset - 0x08]; + +#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ + ldx [%src - offset - 0x20], %t0; \ + ldx [%src - offset - 0x18], %t1; \ + ldx [%src - offset - 0x10], %t2; \ + ldx [%src - offset - 0x08], %t3; \ + stx %t0, [%dst - offset - 0x20]; \ + stx %t1, [%dst - offset - 0x18]; \ + stx %t2, [%dst - offset - 0x10]; \ + stx %t3, [%dst - offset - 0x08]; \ + ldx [%src - offset - 0x40], %t0; \ + ldx [%src - offset - 0x38], %t1; \ + ldx [%src - offset - 0x30], %t2; \ + ldx [%src - offset - 0x28], %t3; \ + stx %t0, [%dst - offset - 0x40]; \ + stx %t1, [%dst - offset - 0x38]; \ + stx %t2, [%dst - offset - 0x30]; \ + stx %t3, [%dst - offset - 0x28]; + +#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ + ldx [%src + offset + 0x00], %t0; \ + ldx [%src + offset + 0x08], %t1; \ + stw %t0, [%dst + offset + 0x04]; \ + srlx %t0, 32, %t2; \ + stw %t2, [%dst + offset + 0x00]; \ + stw %t1, [%dst + offset + 0x0c]; \ + srlx %t1, 32, %t3; \ + stw %t3, [%dst + offset + 0x08]; + +#define RMOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \ + ldx [%src + offset + 0x00], %t0; \ + ldx [%src + offset + 0x08], %t1; \ + stx %t0, [%dst + offset + 0x00]; \ + stx %t1, [%dst + offset + 0x08]; + + .align 32 +228: andcc %o2, 1, %g0 + be,pt %icc, 2f+4 +1: ldub [%o1 - 1], %o5 + sub %o1, 1, %o1 + sub %o0, 1, %o0 + subcc %o2, 1, %o2 + be,pn %xcc, 229f + stb %o5, [%o0] +2: ldub [%o1 - 1], %o5 + sub %o0, 2, %o0 + ldub [%o1 - 2], %g5 + sub %o1, 2, %o1 + subcc %o2, 2, %o2 + stb %o5, [%o0 + 1] + bne,pt %xcc, 2b + stb %g5, [%o0] +229: retl + mov %g4, %o0 +out: retl + mov %g5, %o0 + + .align 32 +ENTRY(memmove) + mov %o0, %g5 +#ifndef USE_BPR + srl %o2, 0, %o2 +#endif + brz,pn %o2, out + sub %o0, %o1, %o4 + cmp %o4, %o2 + bgeu,pt %XCC, 218b + mov %o0, %g4 + add %o0, %o2, %o0 +220: add %o1, %o2, %o1 + cmp %o2, 15 + bleu,pn %xcc, 228b + andcc %o0, 7, %g2 + sub %o0, %o1, %g5 + andcc %g5, 3, %o5 + bne,pn %xcc, 232f + andcc %o1, 3, %g0 + be,a,pt %xcc, 236f + andcc %o1, 4, %g0 + andcc %o1, 1, %g0 + be,pn %xcc, 4f + andcc %o1, 2, %g0 + ldub [%o1 - 1], %g2 + sub %o1, 1, %o1 + sub %o0, 1, %o0 + sub %o2, 1, %o2 + be,pn %xcc, 5f + stb %g2, [%o0] +4: lduh [%o1 - 2], %g2 + sub %o1, 2, %o1 + sub %o0, 2, %o0 + sub %o2, 2, %o2 + sth %g2, [%o0] +5: andcc %o1, 4, %g0 +236: be,a,pn %xcc, 2f + andcc %o2, -128, %g6 + lduw [%o1 - 4], %g5 + sub %o1, 4, %o1 + sub %o0, 4, %o0 + sub %o2, 4, %o2 + stw %g5, [%o0] + andcc %o2, -128, %g6 +2: be,pn %xcc, 235f + andcc %o0, 4, %g0 + be,pn %xcc, 282f + 4 +5: RMOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5) + RMOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5) + RMOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5) + RMOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5) + subcc %g6, 128, %g6 + sub %o1, 128, %o1 + bne,pt %xcc, 5b + sub %o0, 128, %o0 +235: andcc %o2, 0x70, %g6 +41: be,pn %xcc, 280f + andcc %o2, 8, %g0 + +279: rd %pc, %o5 + sll %g6, 1, %g5 + sub %o1, %g6, %o1 + sub %o5, %g5, %o5 + jmpl %o5 + %lo(280f - 279b), %g0 + sub %o0, %g6, %o0 + RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5) + RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5) +280: be,pt %xcc, 281f + andcc %o2, 4, %g0 + ldx [%o1 - 8], %g2 + sub %o0, 8, %o0 + stw %g2, [%o0 + 4] + sub %o1, 8, %o1 + srlx %g2, 32, %g2 + stw %g2, [%o0] +281: be,pt %xcc, 1f + andcc %o2, 2, %g0 + lduw [%o1 - 4], %g2 + sub %o1, 4, %o1 + stw %g2, [%o0 - 4] + sub %o0, 4, %o0 +1: be,pt %xcc, 1f + andcc %o2, 1, %g0 + lduh [%o1 - 2], %g2 + sub %o1, 2, %o1 + sth %g2, [%o0 - 2] + sub %o0, 2, %o0 +1: be,pt %xcc, 211f + nop + ldub [%o1 - 1], %g2 + stb %g2, [%o0 - 1] +211: retl + mov %g4, %o0 + +282: RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5) + RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5) + subcc %g6, 128, %g6 + sub %o1, 128, %o1 + bne,pt %xcc, 282b + sub %o0, 128, %o0 + andcc %o2, 0x70, %g6 + be,pn %xcc, 284f + andcc %o2, 8, %g0 + +283: rd %pc, %o5 + sub %o1, %g6, %o1 + sub %o5, %g6, %o5 + jmpl %o5 + %lo(284f - 283b), %g0 + sub %o0, %g6, %o0 + RMOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3) + RMOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3) +284: be,pt %xcc, 285f + andcc %o2, 4, %g0 + ldx [%o1 - 8], %g2 + sub %o0, 8, %o0 + sub %o1, 8, %o1 + stx %g2, [%o0] +285: be,pt %xcc, 1f + andcc %o2, 2, %g0 + lduw [%o1 - 4], %g2 + sub %o0, 4, %o0 + sub %o1, 4, %o1 + stw %g2, [%o0] +1: be,pt %xcc, 1f + andcc %o2, 1, %g0 + lduh [%o1 - 2], %g2 + sub %o0, 2, %o0 + sub %o1, 2, %o1 + sth %g2, [%o0] +1: be,pt %xcc, 1f + nop + ldub [%o1 - 1], %g2 + stb %g2, [%o0 - 1] +1: retl + mov %g4, %o0 + +232: ldub [%o1 - 1], %g5 + sub %o1, 1, %o1 + sub %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %xcc, 232b + stb %g5, [%o0] +234: retl + mov %g4, %o0 +END(memmove) + +#ifdef USE_BPR +weak_alias (memcpy, __align_cpy_1) +weak_alias (memcpy, __align_cpy_2) +weak_alias (memcpy, __align_cpy_4) +weak_alias (memcpy, __align_cpy_8) +weak_alias (memcpy, __align_cpy_16) +#endif +libc_hidden_builtin_def (memcpy) +libc_hidden_builtin_def (memmove) diff --git a/sysdeps/sparc/sparc64/sparcv9v/memset.S b/sysdeps/sparc/sparc64/sparcv9v/memset.S new file mode 100644 index 0000000..ac0a50c --- /dev/null +++ b/sysdeps/sparc/sparc64/sparcv9v/memset.S @@ -0,0 +1,127 @@ +/* Set a block of memory to some byte value. For SUN4V Niagara. + Copyright (C) 2006 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by David S. Miller (davem@davemloft.net) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <sysdep.h> + +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 +#define ASI_P 0x80 +#define ASI_PNF 0x82 + +#ifndef XCC +#define USE_BPR +#define XCC xcc +#endif + + .register %g2,#scratch + + .text + .align 32 + +ENTRY(memset) + /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %XCC, 1f + or %g1, %o2, %o2 +END(memset) + +ENTRY(__bzero) + clr %o2 +1: brz,pn %o1, 90f + mov %o0, %o3 + + wr %g0, ASI_P, %asi + + cmp %o1, 15 + bl,pn %icc, 70f + andcc %o0, 0x7, %g1 + be,pt %XCC, 2f + mov 8, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: stba %o2, [%o0 + 0x00] %asi + subcc %g1, 1, %g1 + bne,pt %XCC, 1b + add %o0, 1, %o0 +2: cmp %o1, 128 + bl,pn %icc, 60f + andcc %o0, (64 - 1), %g1 + be,pt %XCC, 40f + mov 64, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: stxa %o2, [%o0 + 0x00] %asi + subcc %g1, 8, %g1 + bne,pt %XCC, 1b + add %o0, 8, %o0 + +40: + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + andn %o1, (64 - 1), %g1 + sub %o1, %g1, %o1 +50: + stxa %o2, [%o0 + 0x00] %asi + stxa %o2, [%o0 + 0x08] %asi + stxa %o2, [%o0 + 0x10] %asi + stxa %o2, [%o0 + 0x18] %asi + stxa %o2, [%o0 + 0x20] %asi + stxa %o2, [%o0 + 0x28] %asi + stxa %o2, [%o0 + 0x30] %asi + stxa %o2, [%o0 + 0x38] %asi + subcc %g1, 64, %g1 + bne,pt %XCC, 50b + add %o0, 64, %o0 + + wr %g0, ASI_P, %asi + brz,pn %o1, 80f +60: + andncc %o1, 0x7, %g1 + be,pn %XCC, 2f + sub %o1, %g1, %o1 +1: stxa %o2, [%o0 + 0x00] %asi + subcc %g1, 8, %g1 + bne,pt %XCC, 1b + add %o0, 8, %o0 +2: brz,pt %o1, 80f + nop + +70: +1: stba %o2, [%o0 + 0x00] %asi + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 + + /* fallthrough */ + +80: + wr %g0, ASI_PNF, %asi + +90: + retl + mov %o3, %o0 +END(__bzero) + +libc_hidden_builtin_def (memset) +weak_alias (__bzero, bzero) |