diff options
author | Maciej W. Rozycki <macro@codesourcery.com> | 2013-02-05 14:41:32 +0000 |
---|---|---|
committer | Maciej W. Rozycki <macro@codesourcery.com> | 2013-02-05 14:55:20 +0000 |
commit | b82ba2f011fc4628ceece07412846d0b4d50cac2 (patch) | |
tree | 58b00169b96ca952724f0ca2e1d8c1f3969e3d17 | |
parent | 9a0d1941d30221ed8875ebef4c080bd726740802 (diff) | |
download | glibc-b82ba2f011fc4628ceece07412846d0b4d50cac2.zip glibc-b82ba2f011fc4628ceece07412846d0b4d50cac2.tar.gz glibc-b82ba2f011fc4628ceece07412846d0b4d50cac2.tar.bz2 |
MIPS: Respect the legacy syscall restart convention.
That convention requires the instruction immediately preceding SYSCALL
to initialize $v0 with the syscall number. Then if a restart triggers,
$v0 will have been clobbered by the syscall interrupted, and needs to be
reinititalized. The kernel will decrement the PC by 4 before switching
back to the user mode so that $v0 has been reloaded before SYSCALL is
executed again. This implies the place $v0 is loaded from must be
preserved across a syscall, e.g. an immediate, static register, stack
slot, etc.
The restriction was lifted with Linux 2.6.36 kernel release and no
special requirements are placed around the SYSCALL instruction anymore,
however we still support older kernel binaries.
-rw-r--r-- | NEWS | 2 | ||||
-rw-r--r-- | ports/ChangeLog.mips | 34 | ||||
-rw-r--r-- | ports/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h | 121 | ||||
-rw-r--r-- | ports/sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h | 100 | ||||
-rw-r--r-- | ports/sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h | 100 |
5 files changed, 269 insertions, 88 deletions
@@ -10,7 +10,7 @@ Version 2.18 * The following bugs are resolved with this release: 13951, 14142, 14200, 14317, 14327, 14496, 14964, 14981, 14982, 14985, - 14994, 14996, 15003, 15020, 15023, 15036, 15062. + 14994, 14996, 15003, 15020, 15023, 15036, 15054, 15062. Version 2.17 diff --git a/ports/ChangeLog.mips b/ports/ChangeLog.mips index 65d4206..c5a2cb9 100644 --- a/ports/ChangeLog.mips +++ b/ports/ChangeLog.mips @@ -1,3 +1,37 @@ +2013-02-05 Maciej W. Rozycki <macro@codesourcery.com> + + [BZ #15054] + * sysdeps/unix/sysv/linux/mips/mips32/sysdep.h (MOVE32): + New macro. + (INTERNAL_SYSCALL_NCS): Use it. Rewrite to respect the syscall + restart convention. + (INTERNAL_SYSCALL): Rewrite to respect the syscall restart + convention. + (internal_syscall0, internal_syscall1): Likewise. + (internal_syscall2, internal_syscall3): Likewise. + (internal_syscall4, internal_syscall5): Likewise. + (internal_syscall6, internal_syscall7): Likewise. + * sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h (MOVE32): + New macro. + (INTERNAL_SYSCALL_NCS): Use it. Rewrite to respect the syscall + restart convention. + (INTERNAL_SYSCALL): Rewrite to respect the syscall restart + convention. + (internal_syscall0, internal_syscall1): Likewise. + (internal_syscall2, internal_syscall3): Likewise. + (internal_syscall4, internal_syscall5): Likewise. + (internal_syscall6): Likewise. + * sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h (MOVE32): + New macro. + (INTERNAL_SYSCALL_NCS): Use it. Rewrite to respect the syscall + restart convention. + (INTERNAL_SYSCALL): Rewrite to respect the syscall restart + convention. + (internal_syscall0, internal_syscall1): Likewise. + (internal_syscall2, internal_syscall3): Likewise. + (internal_syscall4, internal_syscall5): Likewise. + (internal_syscall6): Likewise. + 2013-02-04 Joseph Myers <joseph@codesourcery.com> [BZ #13550] diff --git a/ports/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h b/ports/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h index e79fda9..51ae813 100644 --- a/ports/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h +++ b/ports/sysdeps/unix/sysv/linux/mips/mips32/sysdep.h @@ -67,25 +67,57 @@ #undef INTERNAL_SYSCALL_ERRNO #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val) +/* Note that the original Linux syscall restart convention required the + instruction immediately preceding SYSCALL to initialize $v0 with the + syscall number. Then if a restart triggered, $v0 would have been + clobbered by the syscall interrupted, and needed to be reinititalized. + The kernel would decrement the PC by 4 before switching back to the + user mode so that $v0 had been reloaded before SYSCALL was executed + again. This implied the place $v0 was loaded from must have been + preserved across a syscall, e.g. an immediate, static register, stack + slot, etc. + + The convention was relaxed in Linux with a change applied to the kernel + GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that + first appeared in the 2.6.36 release. Since then the kernel has had + code that reloads $v0 upon syscall restart and resumes right at the + SYSCALL instruction, so no special arrangement is needed anymore. + + For backwards compatibility with existing kernel binaries we support + the old convention by choosing the instruction preceding SYSCALL + carefully. This also means we have to force a 32-bit encoding of the + microMIPS MOVE instruction if one is used. */ + +#ifdef __mips_micromips +# define MOVE32 "move32" +#else +# define MOVE32 "move" +#endif + #undef INTERNAL_SYSCALL -#define INTERNAL_SYSCALL(name, err, nr, args...) \ - internal_syscall##nr (, "li\t$2, %2\t\t\t# " #name "\n\t", \ - "i" (SYS_ify (name)), err, args) +#define INTERNAL_SYSCALL(name, err, nr, args...) \ + internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \ + "IK" (SYS_ify (name)), \ + 0, err, args) #undef INTERNAL_SYSCALL_NCS -#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ - internal_syscall##nr (= number, , "r" (__v0), err, args) +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ + internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \ + "r" (__s0), \ + number, err, args) -#define internal_syscall0(ncs_init, cs_init, input, err, dummy...) \ +#define internal_syscall0(v0_init, input, number, err, dummy...) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -97,17 +129,19 @@ _sys_result; \ }) -#define internal_syscall1(ncs_init, cs_init, input, err, arg1) \ +#define internal_syscall1(v0_init, input, number, err, arg1) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -119,20 +153,22 @@ _sys_result; \ }) -#define internal_syscall2(ncs_init, cs_init, input, err, arg1, arg2) \ +#define internal_syscall2(v0_init, input, number, err, arg1, arg2) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ : input, "r" (__a0), "r" (__a1) \ : __SYSCALL_CLOBBERS); \ @@ -142,21 +178,24 @@ _sys_result; \ }) -#define internal_syscall3(ncs_init, cs_init, input, err, arg1, arg2, arg3)\ +#define internal_syscall3(v0_init, input, number, err, \ + arg1, arg2, arg3) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ : input, "r" (__a0), "r" (__a1), "r" (__a2) \ : __SYSCALL_CLOBBERS); \ @@ -166,21 +205,24 @@ _sys_result; \ }) -#define internal_syscall4(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4)\ +#define internal_syscall4(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ register long __a3 asm("$7") = (long) (arg4); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ : input, "r" (__a0), "r" (__a1), "r" (__a2) \ : __SYSCALL_CLOBBERS); \ @@ -197,13 +239,16 @@ #define FORCE_FRAME_POINTER \ void *volatile __fp_force __attribute__ ((unused)) = alloca (4) -#define internal_syscall5(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5)\ +#define internal_syscall5(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5) \ ({ \ long _sys_result; \ \ FORCE_FRAME_POINTER; \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ @@ -212,10 +257,10 @@ ".set\tnoreorder\n\t" \ "subu\t$29, 32\n\t" \ "sw\t%6, 16($29)\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ "addiu\t$29, 32\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ : input, "r" (__a0), "r" (__a1), "r" (__a2), \ "r" ((long) (arg5)) \ @@ -226,13 +271,16 @@ _sys_result; \ }) -#define internal_syscall6(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6)\ +#define internal_syscall6(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ ({ \ long _sys_result; \ \ FORCE_FRAME_POINTER; \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ @@ -242,10 +290,10 @@ "subu\t$29, 32\n\t" \ "sw\t%6, 16($29)\n\t" \ "sw\t%7, 20($29)\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ "addiu\t$29, 32\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ : input, "r" (__a0), "r" (__a1), "r" (__a2), \ "r" ((long) (arg5)), "r" ((long) (arg6)) \ @@ -256,13 +304,16 @@ _sys_result; \ }) -#define internal_syscall7(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\ +#define internal_syscall7(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ ({ \ long _sys_result; \ \ FORCE_FRAME_POINTER; \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ @@ -273,10 +324,10 @@ "sw\t%6, 16($29)\n\t" \ "sw\t%7, 20($29)\n\t" \ "sw\t%8, 24($29)\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ "addiu\t$29, 32\n\t" \ - ".set\treorder" \ + ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ : input, "r" (__a0), "r" (__a1), "r" (__a2), \ "r" ((long) (arg5)), "r" ((long) (arg6)), "r" ((long) (arg7)) \ diff --git a/ports/sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h b/ports/sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h index 3ebbf89..41a6f22 100644 --- a/ports/sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h +++ b/ports/sysdeps/unix/sysv/linux/mips/mips64/n32/sysdep.h @@ -71,25 +71,57 @@ #undef INTERNAL_SYSCALL_ERRNO #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val) +/* Note that the original Linux syscall restart convention required the + instruction immediately preceding SYSCALL to initialize $v0 with the + syscall number. Then if a restart triggered, $v0 would have been + clobbered by the syscall interrupted, and needed to be reinititalized. + The kernel would decrement the PC by 4 before switching back to the + user mode so that $v0 had been reloaded before SYSCALL was executed + again. This implied the place $v0 was loaded from must have been + preserved across a syscall, e.g. an immediate, static register, stack + slot, etc. + + The convention was relaxed in Linux with a change applied to the kernel + GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that + first appeared in the 2.6.36 release. Since then the kernel has had + code that reloads $v0 upon syscall restart and resumes right at the + SYSCALL instruction, so no special arrangement is needed anymore. + + For backwards compatibility with existing kernel binaries we support + the old convention by choosing the instruction preceding SYSCALL + carefully. This also means we have to force a 32-bit encoding of the + microMIPS MOVE instruction if one is used. */ + +#ifdef __mips_micromips +# define MOVE32 "move32" +#else +# define MOVE32 "move" +#endif + #undef INTERNAL_SYSCALL -#define INTERNAL_SYSCALL(name, err, nr, args...) \ - internal_syscall##nr (, "li\t$2, %2\t\t\t# " #name "\n\t", \ - "i" (SYS_ify (name)), err, args) +#define INTERNAL_SYSCALL(name, err, nr, args...) \ + internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \ + "IK" (SYS_ify (name)), \ + 0, err, args) #undef INTERNAL_SYSCALL_NCS -#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ - internal_syscall##nr (= number, , "r" (__v0), err, args) +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ + internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \ + "r" (__s0), \ + number, err, args) -#define internal_syscall0(ncs_init, cs_init, input, err, dummy...) \ +#define internal_syscall0(v0_init, input, number, err, dummy...) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -101,17 +133,19 @@ _sys_result; \ }) -#define internal_syscall1(ncs_init, cs_init, input, err, arg1) \ +#define internal_syscall1(v0_init, input, number, err, arg1) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -123,18 +157,20 @@ _sys_result; \ }) -#define internal_syscall2(ncs_init, cs_init, input, err, arg1, arg2) \ +#define internal_syscall2(v0_init, input, number, err, arg1, arg2) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a1 asm("$5") = ARGIFY (arg2); \ register long long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -146,19 +182,22 @@ _sys_result; \ }) -#define internal_syscall3(ncs_init, cs_init, input, err, arg1, arg2, arg3) \ +#define internal_syscall3(v0_init, input, number, err, \ + arg1, arg2, arg3) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a1 asm("$5") = ARGIFY (arg2); \ register long long __a2 asm("$6") = ARGIFY (arg3); \ register long long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -170,19 +209,22 @@ _sys_result; \ }) -#define internal_syscall4(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4) \ +#define internal_syscall4(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a1 asm("$5") = ARGIFY (arg2); \ register long long __a2 asm("$6") = ARGIFY (arg3); \ register long long __a3 asm("$7") = ARGIFY (arg4); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ @@ -194,12 +236,15 @@ _sys_result; \ }) -#define internal_syscall5(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5) \ +#define internal_syscall5(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a1 asm("$5") = ARGIFY (arg2); \ register long long __a2 asm("$6") = ARGIFY (arg3); \ @@ -207,7 +252,7 @@ register long long __a4 asm("$8") = ARGIFY (arg5); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ @@ -219,12 +264,15 @@ _sys_result; \ }) -#define internal_syscall6(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6) \ +#define internal_syscall6(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ ({ \ long _sys_result; \ \ { \ - register long long __v0 asm("$2") ncs_init; \ + register long long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long long __v0 asm("$2"); \ register long long __a0 asm("$4") = ARGIFY (arg1); \ register long long __a1 asm("$5") = ARGIFY (arg2); \ register long long __a2 asm("$6") = ARGIFY (arg3); \ @@ -233,7 +281,7 @@ register long long __a5 asm("$9") = ARGIFY (arg6); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ diff --git a/ports/sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h b/ports/sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h index 9d94995..fecd3e4 100644 --- a/ports/sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h +++ b/ports/sysdeps/unix/sysv/linux/mips/mips64/n64/sysdep.h @@ -67,25 +67,57 @@ #undef INTERNAL_SYSCALL_ERRNO #define INTERNAL_SYSCALL_ERRNO(val, err) ((void) (err), val) +/* Note that the original Linux syscall restart convention required the + instruction immediately preceding SYSCALL to initialize $v0 with the + syscall number. Then if a restart triggered, $v0 would have been + clobbered by the syscall interrupted, and needed to be reinititalized. + The kernel would decrement the PC by 4 before switching back to the + user mode so that $v0 had been reloaded before SYSCALL was executed + again. This implied the place $v0 was loaded from must have been + preserved across a syscall, e.g. an immediate, static register, stack + slot, etc. + + The convention was relaxed in Linux with a change applied to the kernel + GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that + first appeared in the 2.6.36 release. Since then the kernel has had + code that reloads $v0 upon syscall restart and resumes right at the + SYSCALL instruction, so no special arrangement is needed anymore. + + For backwards compatibility with existing kernel binaries we support + the old convention by choosing the instruction preceding SYSCALL + carefully. This also means we have to force a 32-bit encoding of the + microMIPS MOVE instruction if one is used. */ + +#ifdef __mips_micromips +# define MOVE32 "move32" +#else +# define MOVE32 "move" +#endif + #undef INTERNAL_SYSCALL -#define INTERNAL_SYSCALL(name, err, nr, args...) \ - internal_syscall##nr (, "li\t$2, %2\t\t\t# " #name "\n\t", \ - "i" (SYS_ify (name)), err, args) +#define INTERNAL_SYSCALL(name, err, nr, args...) \ + internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t", \ + "IK" (SYS_ify (name)), \ + 0, err, args) #undef INTERNAL_SYSCALL_NCS -#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ - internal_syscall##nr (= number, , "r" (__v0), err, args) +#define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \ + internal_syscall##nr (MOVE32 "\t%0, %2\n\t", \ + "r" (__s0), \ + number, err, args) -#define internal_syscall0(ncs_init, cs_init, input, err, dummy...) \ +#define internal_syscall0(v0_init, input, number, err, dummy...) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -97,17 +129,19 @@ _sys_result; \ }) -#define internal_syscall1(ncs_init, cs_init, input, err, arg1) \ +#define internal_syscall1(v0_init, input, number, err, arg1) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set reorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -119,18 +153,20 @@ _sys_result; \ }) -#define internal_syscall2(ncs_init, cs_init, input, err, arg1, arg2) \ +#define internal_syscall2(v0_init, input, number, err, arg1, arg2) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -142,19 +178,22 @@ _sys_result; \ }) -#define internal_syscall3(ncs_init, cs_init, input, err, arg1, arg2, arg3) \ +#define internal_syscall3(v0_init, input, number, err, \ + arg1, arg2, arg3) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ register long __a3 asm("$7"); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "=r" (__a3) \ @@ -166,19 +205,22 @@ _sys_result; \ }) -#define internal_syscall4(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4) \ +#define internal_syscall4(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ register long __a3 asm("$7") = (long) (arg4); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ @@ -190,12 +232,15 @@ _sys_result; \ }) -#define internal_syscall5(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5) \ +#define internal_syscall5(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ @@ -203,7 +248,7 @@ register long __a4 asm("$8") = (long) (arg5); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ @@ -215,12 +260,15 @@ _sys_result; \ }) -#define internal_syscall6(ncs_init, cs_init, input, err, arg1, arg2, arg3, arg4, arg5, arg6) \ +#define internal_syscall6(v0_init, input, number, err, \ + arg1, arg2, arg3, arg4, arg5, arg6) \ ({ \ long _sys_result; \ \ { \ - register long __v0 asm("$2") ncs_init; \ + register long __s0 asm("$16") __attribute__ ((unused)) \ + = (number); \ + register long __v0 asm("$2"); \ register long __a0 asm("$4") = (long) (arg1); \ register long __a1 asm("$5") = (long) (arg2); \ register long __a2 asm("$6") = (long) (arg3); \ @@ -229,7 +277,7 @@ register long __a5 asm("$9") = (long) (arg6); \ __asm__ volatile ( \ ".set\tnoreorder\n\t" \ - cs_init \ + v0_init \ "syscall\n\t" \ ".set\treorder" \ : "=r" (__v0), "+r" (__a3) \ |