aboutsummaryrefslogtreecommitdiff
path: root/elf/tst-auditmod6c.c
diff options
context:
space:
mode:
authorH.J. Lu <hongjiu.lu@intel.com>2009-08-08 10:54:42 -0700
committerUlrich Drepper <drepper@redhat.com>2009-08-08 10:54:42 -0700
commit4e1e2f42472744569f1540dd8410d23180e24bf9 (patch)
tree420047379cb0d341d37510158d4ca1a88ec57606 /elf/tst-auditmod6c.c
parentfc1870e6a484ad3211648c9ae51bc076913518aa (diff)
downloadglibc-4e1e2f42472744569f1540dd8410d23180e24bf9.zip
glibc-4e1e2f42472744569f1540dd8410d23180e24bf9.tar.gz
glibc-4e1e2f42472744569f1540dd8410d23180e24bf9.tar.bz2
Support mixed SSE/AVX audit and check AVX only once.
This patch fixes mixed SSE/AVX audit and checks AVX only once in _dl_runtime_profile. When an AVX or SSE register value in pltenter is modified, we have to make sure that the SSE part value is the same in both lr_xmm and lr_vector fields so that pltexit will get the correct value from either lr_xmm or lr_vector fields. AVX-enabled pltenter should update both lr_xmm and lr_vector fields to support stacked AVX/SSE pltenter functions.
Diffstat (limited to 'elf/tst-auditmod6c.c')
-rw-r--r--elf/tst-auditmod6c.c225
1 files changed, 225 insertions, 0 deletions
diff --git a/elf/tst-auditmod6c.c b/elf/tst-auditmod6c.c
new file mode 100644
index 0000000..49cbf05
--- /dev/null
+++ b/elf/tst-auditmod6c.c
@@ -0,0 +1,225 @@
+/* Verify that changing AVX registers in audit library won't affect
+ function parameter passing/return. */
+
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <bits/wordsize.h>
+#include <gnu/lib-names.h>
+
+unsigned int
+la_version (unsigned int v)
+{
+ setlinebuf (stdout);
+
+ printf ("version: %u\n", v);
+
+ char buf[20];
+ sprintf (buf, "%u", v);
+
+ return v;
+}
+
+void
+la_activity (uintptr_t *cookie, unsigned int flag)
+{
+ if (flag == LA_ACT_CONSISTENT)
+ printf ("activity: consistent\n");
+ else if (flag == LA_ACT_ADD)
+ printf ("activity: add\n");
+ else if (flag == LA_ACT_DELETE)
+ printf ("activity: delete\n");
+ else
+ printf ("activity: unknown activity %u\n", flag);
+}
+
+char *
+la_objsearch (const char *name, uintptr_t *cookie, unsigned int flag)
+{
+ char buf[100];
+ const char *flagstr;
+ if (flag == LA_SER_ORIG)
+ flagstr = "LA_SET_ORIG";
+ else if (flag == LA_SER_LIBPATH)
+ flagstr = "LA_SER_LIBPATH";
+ else if (flag == LA_SER_RUNPATH)
+ flagstr = "LA_SER_RUNPATH";
+ else if (flag == LA_SER_CONFIG)
+ flagstr = "LA_SER_CONFIG";
+ else if (flag == LA_SER_DEFAULT)
+ flagstr = "LA_SER_DEFAULT";
+ else if (flag == LA_SER_SECURE)
+ flagstr = "LA_SER_SECURE";
+ else
+ {
+ sprintf (buf, "unknown flag %d", flag);
+ flagstr = buf;
+ }
+ printf ("objsearch: %s, %s\n", name, flagstr);
+
+ return (char *) name;
+}
+
+unsigned int
+la_objopen (struct link_map *l, Lmid_t lmid, uintptr_t *cookie)
+{
+ printf ("objopen: %ld, %s\n", lmid, l->l_name);
+
+ return 3;
+}
+
+void
+la_preinit (uintptr_t *cookie)
+{
+ printf ("preinit\n");
+}
+
+unsigned int
+la_objclose (uintptr_t *cookie)
+{
+ printf ("objclose\n");
+ return 0;
+}
+
+uintptr_t
+la_symbind64 (Elf64_Sym *sym, unsigned int ndx, uintptr_t *refcook,
+ uintptr_t *defcook, unsigned int *flags, const char *symname)
+{
+ printf ("symbind64: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+ symname, (long int) sym->st_value, ndx, *flags);
+
+ return sym->st_value;
+}
+
+#define pltenter la_x86_64_gnu_pltenter
+#define pltexit la_x86_64_gnu_pltexit
+#define La_regs La_x86_64_regs
+#define La_retval La_x86_64_retval
+#define int_retval lrv_rax
+
+#include <tst-audit.h>
+
+#ifdef __AVX__
+#include <immintrin.h>
+#include <cpuid.h>
+
+static int avx = -1;
+
+static int
+__attribute ((always_inline))
+check_avx (void)
+{
+ if (avx == -1)
+ {
+ unsigned int eax, ebx, ecx, edx;
+
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+ && (ecx & bit_AVX))
+ avx = 1;
+ else
+ avx = 0;
+ }
+ return avx;
+}
+#else
+#include <emmintrin.h>
+#endif
+
+ElfW(Addr)
+pltenter (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+ uintptr_t *defcook, La_regs *regs, unsigned int *flags,
+ const char *symname, long int *framesizep)
+{
+ printf ("pltenter: symname=%s, st_value=%#lx, ndx=%u, flags=%u\n",
+ symname, (long int) sym->st_value, ndx, *flags);
+
+#ifdef __AVX__
+ if (check_avx () && strcmp (symname, "audit_test") == 0)
+ {
+ int i;
+ __m128i xmm;
+ __m256i ymm;
+
+ for (i = 0; i < 8; i += 2)
+ {
+ xmm = _mm_set1_epi32 (i + 1);
+ if (memcmp (&regs->lr_xmm[i], &xmm, sizeof (xmm))
+ || memcmp (&regs->lr_vector[i], &xmm, sizeof (xmm)))
+ abort ();
+ regs->lr_xmm[i] = (La_x86_64_xmm) _mm_set1_epi32 (i + 0x100);
+ regs->lr_vector[i].xmm[0] = regs->lr_xmm[i];
+
+ ymm = _mm256_set1_epi32 (i + 2);
+ if (memcmp (&regs->lr_xmm[i + 1],
+ &regs->lr_vector[i + 1].xmm[0], sizeof (xmm))
+ || memcmp (&regs->lr_vector[i + 1], &ymm, sizeof (ymm)))
+ abort ();
+ regs->lr_vector[i + 1].ymm[0]
+ = (La_x86_64_ymm) _mm256_set1_epi32 (i + 0x101);
+ regs->lr_xmm[i + 1] = regs->lr_vector[i + 1].xmm[0];
+ }
+
+ ymm = _mm256_set1_epi32 (-1);
+ asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+ asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+ asm volatile ("vmovdqa %0, %%ymm2" : : "x" (ymm) : "xmm2" );
+ asm volatile ("vmovdqa %0, %%ymm3" : : "x" (ymm) : "xmm3" );
+ asm volatile ("vmovdqa %0, %%ymm4" : : "x" (ymm) : "xmm4" );
+ asm volatile ("vmovdqa %0, %%ymm5" : : "x" (ymm) : "xmm5" );
+ asm volatile ("vmovdqa %0, %%ymm6" : : "x" (ymm) : "xmm6" );
+ asm volatile ("vmovdqa %0, %%ymm7" : : "x" (ymm) : "xmm7" );
+
+ *framesizep = 1024;
+ }
+#endif
+
+ return sym->st_value;
+}
+
+unsigned int
+pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook,
+ uintptr_t *defcook, const La_regs *inregs, La_retval *outregs,
+ const char *symname)
+{
+ printf ("pltexit: symname=%s, st_value=%#lx, ndx=%u, retval=%tu\n",
+ symname, (long int) sym->st_value, ndx, outregs->int_retval);
+
+#ifdef __AVX__
+ if (check_avx () && strcmp (symname, "audit_test") == 0)
+ {
+ int i;
+
+ __m256i ymm = _mm256_set1_epi32 (0x12349876);;
+ if (memcmp (&outregs->lrv_vector0, &ymm, sizeof (ymm)))
+ abort ();
+
+ __m128i xmm;
+
+ for (i = 0; i < 8; i += 2)
+ {
+ xmm = _mm_set1_epi32 (i + 0x100);
+ if (memcmp (&inregs->lr_xmm[i], &xmm, sizeof (xmm))
+ || memcmp (&inregs->lr_vector[i], &xmm, sizeof (xmm)))
+ abort ();
+
+ ymm = _mm256_set1_epi32 (i + 0x101);
+ if (memcmp (&inregs->lr_xmm[i + 1],
+ &inregs->lr_vector[i + 1].xmm[0], sizeof (xmm))
+ || memcmp (&inregs->lr_vector[i + 1], &ymm, sizeof (ymm)))
+ abort ();
+ }
+
+ outregs->lrv_vector0.ymm[0]
+ = (La_x86_64_ymm) _mm256_set1_epi32 (0x98abcdef);
+
+ ymm = _mm256_set1_epi32 (-1);
+ asm volatile ("vmovdqa %0, %%ymm0" : : "x" (ymm) : "xmm0" );
+ asm volatile ("vmovdqa %0, %%ymm1" : : "x" (ymm) : "xmm1" );
+ }
+#endif
+
+ return 0;
+}