aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-05-10 20:56:19 -0400
committerRich Felker <dalias@aerifal.cx>2019-05-10 20:56:19 -0400
commitc8798ef974d21c338a7d8d874a402978ffc6168e (patch)
tree29598a3b6acda4e3f7b4ea4f94fa20233ade1738 /arch
parent511d70738bce11a67219d0132ce725c323d00e4e (diff)
downloadmusl-c8798ef974d21c338a7d8d874a402978ffc6168e.zip
musl-c8798ef974d21c338a7d8d874a402978ffc6168e.tar.gz
musl-c8798ef974d21c338a7d8d874a402978ffc6168e.tar.bz2
fix regression in i386 inline syscall asm producing invalid code
commit 22e5bbd0deadcbd767864bd714e890b70e1fe1df inlined the i386 syscall mechanism, but wrongly assumed memory operands to the 5- and 6-argument syscall asm would be esp-based. however, nothing in the constraints prevented them from being ebx- or ebp-based, and in those cases, ebx and ebp could be clobbered before use of the memory operand was complete. in the 6-argument case, this prevented restoration of the original register values before the end of the asm block, breaking the asm contract since ebx and ebp are not marked as clobbered. (they can't be, because lots of compilers don't accept these registers in constraints or clobbers if PIC or frame pointer is enabled). doing this right is complicated by the fact that, after a single push, no operands which might be memory operands are usable. if they are esp-based, the value of esp has changed, rendering them invalid. introduce some new dances to load the registers. for the 5-arg case, push the operand that may be a memory operand first, and after that, it doesn't matter if the operand is invalid, since we'll just use the newly pushed value. for the 6-arg case, we need to put both operands in memory to begin with, like the old non-inline code prior to commit 22e5bbd0deadcbd767864bd714e890b70e1fe1df accepted, so that there's only one potentially memory-based operand to the asm. this can then be saved with a single push, and after that the values can be read off into the registers they're needed in. there's some size overhead, but still a lot less execution overhead than the old out-of-line code. doing it better depends on a modern compiler that lets you use ebx and ebp in asm constraints without restriction. the failure modes on compilers where this doesn't work are inconsistent and dangerous (on at least some gcc versions 4.x and earlier, wrong codegen!), so this is a delicate matter. it can be addressed later if needed.
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/syscall_arch.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/i386/syscall_arch.h b/arch/i386/syscall_arch.h
index 8fe3542..29e141f 100644
--- a/arch/i386/syscall_arch.h
+++ b/arch/i386/syscall_arch.h
@@ -49,17 +49,17 @@ static inline long __syscall4(long n, long a1, long a2, long a3, long a4)
static inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5)
{
- unsigned long __ret, __tmp;
- __asm__ __volatile__ ("mov %%ebx,%1 ; mov %3,%%ebx ; " SYSCALL_INSNS " ; mov %1,%%ebx"
- : "=a"(__ret), "=m"(__tmp) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
+ unsigned long __ret;
+ __asm__ __volatile__ ("pushl %2 ; push %%ebx ; mov 4(%%esp),%%ebx ; " SYSCALL_INSNS " ; pop %%ebx ; add $4,%%esp"
+ : "=a"(__ret) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
return __ret;
}
static inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6)
{
- unsigned long __ret, __tmp1, __tmp2;
- __asm__ __volatile__ ("mov %%ebx,%1 ; mov %%ebp,%2 ; mov %4,%%ebx ; mov %9,%%ebp ; " SYSCALL_INSNS " ; mov %2,%%ebp ; mov %1,%%ebx"
- : "=a"(__ret), "=m"(__tmp1), "=m"(__tmp2) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5), "g"(a6) : "memory");
+ unsigned long __ret, a1a6[2] = { a1, a6 };
+ __asm__ __volatile__ ("pushl %1 ; push %%ebx ; push %%ebp ; mov 8(%%esp),%%ebx ; mov 4(%%ebx),%%ebp ; mov (%%ebx),%%ebx ; " SYSCALL_INSNS " ; pop %%ebp ; pop %%ebx ; add $4,%%esp"
+ : "=a"(__ret) : "g"(&a1a6), "a"(n), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
return __ret;
}