From febcd83655138bcb01b2680e170e6773a1ec813c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 6 Sep 2004 02:01:35 +0000 Subject: * sysdeps/alpha/div.S: Save and restore FPCR around fp operations. * sysdeps/alpha/divl.S, sysdeps/alpha/divq.S, sysdeps/alpha/divqu.S, sysdeps/alpha/ldiv.S, sysdeps/alpha/reml.S, sysdeps/alpha/remq.S, sysdeps/alpha/remqu.S: Likewise. * sysdeps/alpha/div_libc.h (FRAME): Increase to 64. 2004-09-05 Richard Henderson * sysdeps/alpha/div.S: Save and restore FPCR around fp operations. * sysdeps/alpha/divl.S, sysdeps/alpha/divq.S, sysdeps/alpha/divqu.S, sysdeps/alpha/ldiv.S, sysdeps/alpha/reml.S, sysdeps/alpha/remq.S, sysdeps/alpha/remqu.S: Likewise. * sysdeps/alpha/div_libc.h (FRAME): Increase to 64. --- sysdeps/alpha/div.S | 5 ++++- sysdeps/alpha/div_libc.h | 2 +- sysdeps/alpha/divl.S | 22 ++++++++++++++++++---- sysdeps/alpha/divq.S | 24 ++++++++++++++++++++---- sysdeps/alpha/divqu.S | 32 +++++++++++++++++++++++++------- sysdeps/alpha/ldiv.S | 7 ++++++- sysdeps/alpha/reml.S | 22 ++++++++++++++++++---- sysdeps/alpha/remq.S | 24 ++++++++++++++++++++---- sysdeps/alpha/remqu.S | 21 ++++++++++++++++++--- 9 files changed, 130 insertions(+), 29 deletions(-) (limited to 'sysdeps') diff --git a/sysdeps/alpha/div.S b/sysdeps/alpha/div.S index e0eb7e9..d1a724d 100644 --- a/sysdeps/alpha/div.S +++ b/sysdeps/alpha/div.S @@ -48,6 +48,8 @@ div: #endif beq $18, $divbyzero + excb + mf_fpcr $f10 _ITOFT2 $17, $f0, 0, $18, $f1, 8 @@ -55,7 +57,8 @@ div: cvtqt $f1, $f1 divt/c $f0, $f1, $f0 cvttq/c $f0, $f0 - + excb + mt_fpcr $f10 _FTOIT $f0, $0, 0 mull $0, $18, $1 diff --git a/sysdeps/alpha/div_libc.h b/sysdeps/alpha/div_libc.h index 2f06282..62b4470 100644 --- a/sysdeps/alpha/div_libc.h +++ b/sysdeps/alpha/div_libc.h @@ -71,7 +71,7 @@ /* In order to make the below work, all top-level divide routines must use the same frame size. */ -#define FRAME 48 +#define FRAME 64 /* Code fragment to generate an integer divide-by-zero fault. When building libc.so, we arrange for there to be one copy of this code diff --git a/sysdeps/alpha/divl.S b/sysdeps/alpha/divl.S index 90cd686..408d66d 100644 --- a/sysdeps/alpha/divl.S +++ b/sysdeps/alpha/divl.S @@ -22,7 +22,12 @@ registers are t10 and t11, the result goes in t12. Only t12 and AT may be clobbered. - The FPU can handle all input values except zero. Whee! */ + The FPU can handle all input values except zero. Whee! + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ #ifndef EXTEND #define EXTEND(S,D) sextl S, D @@ -41,25 +46,34 @@ __divl: cfi_def_cfa_offset (FRAME) CALL_MCOUNT stt $f0, 0(sp) - stt $f1, 8(sp) + excb beq Y, DIVBYZERO + + stt $f1, 8(sp) + stt $f2, 16(sp) cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f2, 16) + mf_fpcr $f2 EXTEND (X, RV) EXTEND (Y, AT) - _ITOFT2 RV, $f0, 16, AT, $f1, 24 + _ITOFT2 RV, $f0, 24, AT, $f1, 32 cvtqt $f0, $f0 cvtqt $f1, $f1 divt/c $f0, $f1, $f0 cvttq/c $f0, $f0 - _FTOIT $f0, RV, 16 + excb + mt_fpcr $f2 + _FTOIT $f0, RV, 24 ldt $f0, 0(sp) ldt $f1, 8(sp) + ldt $f2, 16(sp) lda sp, FRAME(sp) cfi_restore ($f0) cfi_restore ($f1) + cfi_restore ($f2) cfi_def_cfa_offset (0) sextl RV, RV ret $31, (RA), 1 diff --git a/sysdeps/alpha/divq.S b/sysdeps/alpha/divq.S index cab6c34..7f245ac 100644 --- a/sysdeps/alpha/divq.S +++ b/sysdeps/alpha/divq.S @@ -33,7 +33,12 @@ When the dividend is outside the range for which we can compute exact results, we use the fp quotent as an estimate from which we begin refining an exact integral value. This reduces the number of iterations in the - shift-and-subtract loop significantly. */ + shift-and-subtract loop significantly. + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ .text .align 4 @@ -53,10 +58,15 @@ __divq: ready -- all the time in the world to figure out how we're going to use the results. */ stt $f0, 0(sp) - stt $f1, 8(sp) + excb beq Y, DIVBYZERO + + stt $f1, 8(sp) + stt $f3, 48(sp) cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f3, 48) + mf_fpcr $f3 _ITOFT2 X, $f0, 16, Y, $f1, 24 cvtqt $f0, $f0 @@ -73,12 +83,16 @@ __divq: /* If we get here, we're expecting exact results from the division. Do nothing else besides convert and clean up. */ cvttq/c $f0, $f0 + excb + mt_fpcr $f3 _FTOIT $f0, RV, 16 ldt $f0, 0(sp) + ldt $f3, 48(sp) cfi_restore ($f1) cfi_remember_state cfi_restore ($f0) + cfi_restore ($f3) cfi_def_cfa_offset (0) lda sp, FRAME(sp) ret $31, (RA), 1 @@ -121,9 +135,9 @@ $fix_sign_in_ret2: cfi_rel_offset (t3, 0) mulq Q, Y, QY - unop + excb stq t4, 8(sp) - unop + mt_fpcr $f3 cfi_rel_offset (t4, 8) subq QY, X, R @@ -147,6 +161,7 @@ $fix_sign_out_ret: ldq t3, 0(sp) ldq t4, 8(sp) ldq t5, 40(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_remember_state cfi_restore (t0) @@ -155,6 +170,7 @@ $fix_sign_out_ret: cfi_restore (t3) cfi_restore (t4) cfi_restore (t5) + cfi_restore ($f3) cfi_def_cfa_offset (0) ret $31, (RA), 1 diff --git a/sysdeps/alpha/divqu.S b/sysdeps/alpha/divqu.S index 63b575f..fc00fa1 100644 --- a/sysdeps/alpha/divqu.S +++ b/sysdeps/alpha/divqu.S @@ -33,7 +33,12 @@ When the dividend is outside the range for which we can compute exact results, we use the fp quotent as an estimate from which we begin refining an exact integral value. This reduces the number of iterations in the - shift-and-subtract loop significantly. */ + shift-and-subtract loop significantly. + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ .text .align 4 @@ -53,10 +58,15 @@ __divqu: ready -- all the time in the world to figure out how we're going to use the results. */ stt $f0, 0(sp) - stt $f1, 8(sp) + excb beq Y, DIVBYZERO + + stt $f1, 8(sp) + stt $f3, 48(sp) cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f3, 48) + mf_fpcr $f3 _ITOFT2 X, $f0, 16, Y, $f1, 24 cvtqt $f0, $f0 @@ -65,10 +75,7 @@ __divqu: divt/c $f0, $f1, $f0 /* Check to see if Y was mis-converted as signed value. */ - .align 4 ldt $f1, 8(sp) - unop - nop blt Y, $y_is_neg /* Check to see if X fit in the double as an exact value. */ @@ -78,11 +85,16 @@ __divqu: /* If we get here, we're expecting exact results from the division. Do nothing else besides convert and clean up. */ cvttq/c $f0, $f0 + excb + mt_fpcr $f3 _FTOIT $f0, RV, 16 + ldt $f0, 0(sp) + ldt $f3, 48(sp) cfi_remember_state cfi_restore ($f0) cfi_restore ($f1) + cfi_restore ($f3) cfi_def_cfa_offset (0) lda sp, FRAME(sp) ret $31, (RA), 1 @@ -140,9 +152,9 @@ $x_big: .align 4 stq t4, 8(sp) - unop + excb ldt $f0, 0(sp) - unop + mt_fpcr $f3 cfi_rel_offset (t4, 8) cfi_restore ($f0) @@ -164,6 +176,7 @@ $q_low_ret: ldq t2, 32(sp) ldq t3, 40(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_remember_state cfi_restore (t0) @@ -171,6 +184,7 @@ $q_low_ret: cfi_restore (t2) cfi_restore (t3) cfi_restore (t4) + cfi_restore ($f3) cfi_def_cfa_offset (0) ret $31, (RA), 1 @@ -227,9 +241,13 @@ $y_is_neg: from the divide will be completely wrong. Fortunately, the quotient must be either 0 or 1, so just compute it directly. */ cmpult Y, X, RV + excb + mt_fpcr $f3 ldt $f0, 0(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_restore ($f0) + cfi_restore ($f3) cfi_def_cfa_offset (0) ret $31, (RA), 1 diff --git a/sysdeps/alpha/ldiv.S b/sysdeps/alpha/ldiv.S index c90edfb..3909672 100644 --- a/sysdeps/alpha/ldiv.S +++ b/sysdeps/alpha/ldiv.S @@ -53,6 +53,8 @@ ldiv: #endif beq Y, $divbyzero + excb + mf_fpcr $f10 _ITOFT2 X, $f0, 0, Y, $f1, 8 @@ -71,6 +73,8 @@ ldiv: /* If we get here, we're expecting exact results from the division. Do nothing else besides convert and clean up. */ cvttq/c $f0, $f0 + excb + mt_fpcr $f10 _FTOIT $f0, $0, 0 $egress: @@ -107,9 +111,10 @@ $fix_sign_in_ret1: cvttq/c $f0, $f0 _FTOIT $f0, Q, 8 - .align 3 $fix_sign_in_ret2: mulq Q, Y, QY + excb + mt_fpcr $f10 .align 4 subq QY, X, R diff --git a/sysdeps/alpha/reml.S b/sysdeps/alpha/reml.S index 1bbb978..bfc3be5 100644 --- a/sysdeps/alpha/reml.S +++ b/sysdeps/alpha/reml.S @@ -24,7 +24,12 @@ be clobbered. The FPU can handle the division for all input values except zero. - All we have to do is compute the remainder via multiply-and-subtract. */ + All we have to do is compute the remainder via multiply-and-subtract. + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ #ifndef EXTEND #define EXTEND(S,D) sextl S, D @@ -43,26 +48,35 @@ __reml: cfi_def_cfa_offset (FRAME) CALL_MCOUNT stt $f0, 0(sp) - stt $f1, 8(sp) + excb beq Y, DIVBYZERO + + stt $f1, 8(sp) + stt $f2, 16(sp) cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f2, 16) + mf_fpcr $f2 EXTEND (X, RV) EXTEND (Y, AT) - _ITOFT2 RV, $f0, 16, AT, $f1, 24 + _ITOFT2 RV, $f0, 24, AT, $f1, 32 cvtqt $f0, $f0 cvtqt $f1, $f1 divt/c $f0, $f1, $f0 cvttq/c $f0, $f0 - _FTOIT $f0, RV, 16 + excb + mt_fpcr $f2 + _FTOIT $f0, RV, 24 ldt $f0, 0(sp) mull RV, Y, RV ldt $f1, 8(sp) + ldt $f2, 16(sp) lda sp, FRAME(sp) cfi_restore ($f0) cfi_restore ($f1) + cfi_restore ($f2) cfi_def_cfa_offset (0) subl X, RV, RV ret $31, (RA), 1 diff --git a/sysdeps/alpha/remq.S b/sysdeps/alpha/remq.S index 40c68d7..645a834 100644 --- a/sysdeps/alpha/remq.S +++ b/sysdeps/alpha/remq.S @@ -33,7 +33,12 @@ When the dividend is outside the range for which we can compute exact results, we use the fp quotent as an estimate from which we begin refining an exact integral value. This reduces the number of iterations in the - shift-and-subtract loop significantly. */ + shift-and-subtract loop significantly. + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ .text .align 4 @@ -53,10 +58,15 @@ __remq: ready -- all the time in the world to figure out how we're going to use the results. */ stt $f0, 0(sp) - stt $f1, 8(sp) + excb beq Y, DIVBYZERO + + stt $f1, 8(sp) + stt $f3, 48(sp) cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f3, 48) + mf_fpcr $f3 _ITOFT2 X, $f0, 16, Y, $f1, 24 cvtqt $f0, $f0 @@ -73,12 +83,16 @@ __remq: /* If we get here, we're expecting exact results from the division. Do nothing else besides convert, compute remainder, clean up. */ cvttq/c $f0, $f0 + excb + mt_fpcr $f3 _FTOIT $f0, AT, 16 mulq AT, Y, AT ldt $f0, 0(sp) + ldt $f3, 48(sp) cfi_restore ($f1) cfi_remember_state cfi_restore ($f0) + cfi_restore ($f3) cfi_def_cfa_offset (0) lda sp, FRAME(sp) subq X, AT, RV @@ -122,9 +136,9 @@ $fix_sign_in_ret2: cfi_rel_offset (t3, 0) mulq Q, Y, QY - unop + excb stq t4, 8(sp) - unop + mt_fpcr $f3 cfi_rel_offset (t4, 8) subq QY, X, R @@ -148,6 +162,7 @@ $fix_sign_out_ret: ldq t3, 0(sp) ldq t4, 8(sp) ldq t5, 40(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_remember_state cfi_restore (t0) @@ -156,6 +171,7 @@ $fix_sign_out_ret: cfi_restore (t3) cfi_restore (t4) cfi_restore (t5) + cfi_restore ($f3) cfi_def_cfa_offset (0) ret $31, (RA), 1 diff --git a/sysdeps/alpha/remqu.S b/sysdeps/alpha/remqu.S index f8deebb..bfa78df 100644 --- a/sysdeps/alpha/remqu.S +++ b/sysdeps/alpha/remqu.S @@ -33,7 +33,12 @@ When the dividend is outside the range for which we can compute exact results, we use the fp quotent as an estimate from which we begin refining an exact integral value. This reduces the number of iterations in the - shift-and-subtract loop significantly. */ + shift-and-subtract loop significantly. + + The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE + for cvttq/c even without /sui being set. It will not, however, properly + raise the exception, so we don't have to worry about FPCR_INED being clear + and so dying by SIGFPE. */ .text .align 4 @@ -57,11 +62,15 @@ __remqu: and Y, AT, AT stt $f1, 8(sp) + excb + stt $f3, 48(sp) beq AT, $powerof2 cfi_rel_offset ($f0, 0) cfi_rel_offset ($f1, 8) + cfi_rel_offset ($f3, 48) _ITOFT2 X, $f0, 16, Y, $f1, 24 + mf_fpcr $f3 cvtqt $f0, $f0 cvtqt $f1, $f1 @@ -79,14 +88,18 @@ __remqu: /* If we get here, we're expecting exact results from the division. Do nothing else besides convert, compute remainder, clean up. */ cvttq/c $f0, $f0 + excb + mt_fpcr $f3 _FTOIT $f0, AT, 16 mulq AT, Y, AT ldt $f0, 0(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_remember_state cfi_restore ($f0) cfi_restore ($f1) + cfi_restore ($f3) cfi_def_cfa_offset (0) .align 4 @@ -144,9 +157,9 @@ $x_big: .align 4 stq t4, 8(sp) - unop + excb ldt $f0, 0(sp) - unop + mt_fpcr $f3 cfi_rel_offset (t4, 8) cfi_restore ($f0) @@ -168,6 +181,7 @@ $q_low_ret: ldq t2, 32(sp) ldq t3, 40(sp) + ldt $f3, 48(sp) lda sp, FRAME(sp) cfi_remember_state cfi_restore (t0) @@ -175,6 +189,7 @@ $q_low_ret: cfi_restore (t2) cfi_restore (t3) cfi_restore (t4) + cfi_restore ($f3) cfi_def_cfa_offset (0) ret $31, (RA), 1 -- cgit v1.1