diff options
136 files changed, 6747 insertions, 2 deletions
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..54ac075 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +#========================================================================= +# Git Ignore Files +#========================================================================= +# We explicitly ignore a default build directory and the autoconf +# generated autom4te.cache directory. This makes it easy to do a clean +# build under the source directory and still have it appear clean to git. + +build/ +autom4te.cache/ + diff --git a/softfloat b/softfloat deleted file mode 120000 index fe7fce9..0000000 --- a/softfloat +++ /dev/null @@ -1 +0,0 @@ -../sim/softfloat/
\ No newline at end of file diff --git a/softfloat/8086/OLD-specialize.c b/softfloat/8086/OLD-specialize.c new file mode 100755 index 0000000..ffb306d --- /dev/null +++ b/softfloat/8086/OLD-specialize.c @@ -0,0 +1,40 @@ +
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+| Underflow tininess-detection mode, statically initialized to default value.
+| (The declaration in `softfloat.h' must match the `int8' type here.)
+*----------------------------------------------------------------------------*/
+bool float_detectTininess = float_tininess_afterRounding;
+
diff --git a/softfloat/8086/OLD-specialize.h b/softfloat/8086/OLD-specialize.h new file mode 100755 index 0000000..9e4461c --- /dev/null +++ b/softfloat/8086/OLD-specialize.h @@ -0,0 +1,379 @@ +
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+/*----------------------------------------------------------------------------
+| Internal canonical NaN format.
+*----------------------------------------------------------------------------*/
+*** COMMON
+typedef struct {
+ flag sign;
+ uint128_t bits;
+} commonNaNT;
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define float32Bits_defaultNaN 0xFFC00000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+*** COMMON
+#define softfloat_isNaNFloat32Bits( a ) ( 0xFF000000 < (uint32_t) ( a )<<1 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+inline bool softfloat_isSigNaNFloat32Bits( uint32_t a )
+ { return ( ( a>>22 & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); }
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+commonNaNT softfloat_NaNFromFloat32Bits( uint32_t );
+uint32_t softfloat_float32BitsFromNaN( commonNaNT );
+uint32_t softfloat_propNaNFloat32Bits( uint32_t, uint32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define float64Bits_defaultNaN 0xFFF8000000000000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+*** COMMON
+#define softfloat_isNaNFloat64Bits( a ) ( 0xFFE0000000000000 < (uint64_t) ( a )<<1 )
+
+
+
+
+
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float64_is_signaling_nan( float64 a )
+{
+
+ return
+ ( ( ( a>>51 ) & 0xFFF ) == 0xFFE )
+ && ( a & LIT64( 0x0007FFFFFFFFFFFF ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT float64ToCommonNaN( float64 a )
+{
+ commonNaNT z;
+
+ if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+ z.sign = a>>63;
+ z.low = 0;
+ z.high = a<<12;
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the double-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static float64 commonNaNToFloat64( commonNaNT a )
+{
+
+ return
+ ( ( (bits64) a.sign )<<63 )
+ | LIT64( 0x7FF8000000000000 )
+ | ( a.high>>12 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static float64 propagateFloat64NaN( float64 a, float64 b )
+{
+ flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+ aIsNaN = float64_is_nan( a );
+ aIsSignalingNaN = float64_is_signaling_nan( a );
+ bIsNaN = float64_is_nan( b );
+ bIsSignalingNaN = float64_is_signaling_nan( b );
+ a |= LIT64( 0x0008000000000000 );
+ b |= LIT64( 0x0008000000000000 );
+ if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+ if ( aIsSignalingNaN ) {
+ if ( bIsSignalingNaN ) goto returnLargerSignificand;
+ return bIsNaN ? b : a;
+ }
+ else if ( aIsNaN ) {
+ if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+ if ( (bits64) ( a<<1 ) < (bits64) ( b<<1 ) ) return b;
+ if ( (bits64) ( b<<1 ) < (bits64) ( a<<1 ) ) return a;
+ return ( a < b ) ? a : b;
+ }
+ else {
+ return b;
+ }
+
+}
+
+#ifdef FLOATX80
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated extended double-precision NaN. The
+| `high' and `low' values hold the most- and least-significant bits,
+| respectively.
+*----------------------------------------------------------------------------*/
+#define floatx80_default_nan_high 0xFFFF
+#define floatx80_default_nan_low LIT64( 0xC000000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the extended double-precision floating-point value `a' is a
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag floatx80_is_nan( floatx80 a )
+{
+
+ return ( ( a.high & 0x7FFF ) == 0x7FFF ) && (bits64) ( a.low<<1 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the extended double-precision floating-point value `a' is a
+| signaling NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag floatx80_is_signaling_nan( floatx80 a )
+{
+ bits64 aLow;
+
+ aLow = a.low & ~ LIT64( 0x4000000000000000 );
+ return
+ ( ( a.high & 0x7FFF ) == 0x7FFF )
+ && (bits64) ( aLow<<1 )
+ && ( a.low == aLow );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the extended double-precision floating-
+| point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the
+| invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT floatx80ToCommonNaN( floatx80 a )
+{
+ commonNaNT z;
+
+ if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+ z.sign = a.high>>15;
+ z.low = 0;
+ z.high = a.low<<1;
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the extended
+| double-precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static floatx80 commonNaNToFloatx80( commonNaNT a )
+{
+ floatx80 z;
+
+ z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 );
+ z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF;
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two extended double-precision floating-point values `a' and `b', one
+| of which is a NaN, and returns the appropriate NaN result. If either `a' or
+| `b' is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b )
+{
+ flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+ aIsNaN = floatx80_is_nan( a );
+ aIsSignalingNaN = floatx80_is_signaling_nan( a );
+ bIsNaN = floatx80_is_nan( b );
+ bIsSignalingNaN = floatx80_is_signaling_nan( b );
+ a.low |= LIT64( 0xC000000000000000 );
+ b.low |= LIT64( 0xC000000000000000 );
+ if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+ if ( aIsSignalingNaN ) {
+ if ( bIsSignalingNaN ) goto returnLargerSignificand;
+ return bIsNaN ? b : a;
+ }
+ else if ( aIsNaN ) {
+ if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+ if ( a.low < b.low ) return b;
+ if ( b.low < a.low ) return a;
+ return ( a.high < b.high ) ? a : b;
+ }
+ else {
+ return b;
+ }
+
+}
+
+#endif
+
+#ifdef FLOAT128
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated quadruple-precision NaN. The `high' and
+| `low' values hold the most- and least-significant bits, respectively.
+*----------------------------------------------------------------------------*/
+#define float128_default_nan_high LIT64( 0xFFFF800000000000 )
+#define float128_default_nan_low LIT64( 0x0000000000000000 )
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the quadruple-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float128_is_nan( float128 a )
+{
+
+ return
+ ( LIT64( 0xFFFE000000000000 ) <= (bits64) ( a.high<<1 ) )
+ && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the quadruple-precision floating-point value `a' is a
+| signaling NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+
+flag float128_is_signaling_nan( float128 a )
+{
+
+ return
+ ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE )
+ && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) );
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the quadruple-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+
+static commonNaNT float128ToCommonNaN( float128 a )
+{
+ commonNaNT z;
+
+ if ( float128_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
+ z.sign = a.high>>63;
+ shortShift128Left( a.high, a.low, 16, &z.high, &z.low );
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the quadruple-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+static float128 commonNaNToFloat128( commonNaNT a )
+{
+ float128 z;
+
+ shift128Right( a.high, a.low, 16, &z.high, &z.low );
+ z.high |= ( ( (bits64) a.sign )<<63 ) | LIT64( 0x7FFF800000000000 );
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes two quadruple-precision floating-point values `a' and `b', one of
+| which is a NaN, and returns the appropriate NaN result. If either `a' or
+| `b' is a signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+static float128 propagateFloat128NaN( float128 a, float128 b )
+{
+ flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
+
+ aIsNaN = float128_is_nan( a );
+ aIsSignalingNaN = float128_is_signaling_nan( a );
+ bIsNaN = float128_is_nan( b );
+ bIsSignalingNaN = float128_is_signaling_nan( b );
+ a.high |= LIT64( 0x0000800000000000 );
+ b.high |= LIT64( 0x0000800000000000 );
+ if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
+ if ( aIsSignalingNaN ) {
+ if ( bIsSignalingNaN ) goto returnLargerSignificand;
+ return bIsNaN ? b : a;
+ }
+ else if ( aIsNaN ) {
+ if ( bIsSignalingNaN | ! bIsNaN ) return a;
+ returnLargerSignificand:
+ if ( lt128( a.high<<1, a.low, b.high<<1, b.low ) ) return b;
+ if ( lt128( b.high<<1, b.low, a.high<<1, a.low ) ) return a;
+ return ( a.high < b.high ) ? a : b;
+ }
+ else {
+ return b;
+ }
+
+}
+
+#endif
+
diff --git a/softfloat/8086/platform.h b/softfloat/8086/platform.h new file mode 100755 index 0000000..9355edf --- /dev/null +++ b/softfloat/8086/platform.h @@ -0,0 +1,38 @@ + +/*============================================================================ + +*** FIX. + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define LITTLEENDIAN + diff --git a/softfloat/8086/s_commonNaNToF32UI.c b/softfloat/8086/s_commonNaNToF32UI.c new file mode 100755 index 0000000..3b96c41 --- /dev/null +++ b/softfloat/8086/s_commonNaNToF32UI.c @@ -0,0 +1,17 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the single-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+{
+
+ return (uint_fast32_t) a.sign<<31 | 0x7FC00000 | a.v64>>41;
+
+}
+
diff --git a/softfloat/8086/s_commonNaNToF64UI.c b/softfloat/8086/s_commonNaNToF64UI.c new file mode 100755 index 0000000..474ceee --- /dev/null +++ b/softfloat/8086/s_commonNaNToF64UI.c @@ -0,0 +1,19 @@ + +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the double- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN a ) +{ + + return + (uint_fast64_t) a.sign<<63 | UINT64_C( 0x7FF8000000000000 ) + | a.v64>>12; + +} + diff --git a/softfloat/8086/s_f32UIToCommonNaN.c b/softfloat/8086/s_f32UIToCommonNaN.c new file mode 100755 index 0000000..067e8da --- /dev/null +++ b/softfloat/8086/s_f32UIToCommonNaN.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t uiA )
+{
+ struct commonNaN z;
+
+ if ( softfloat_isSigNaNF32UI( uiA ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ z.sign = uiA>>31;
+ z.v64 = (uint_fast64_t) uiA<<41;
+ z.v0 = 0;
+ return z;
+
+}
+
diff --git a/softfloat/8086/s_f64UIToCommonNaN.c b/softfloat/8086/s_f64UIToCommonNaN.c new file mode 100755 index 0000000..f933ded --- /dev/null +++ b/softfloat/8086/s_f64UIToCommonNaN.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t uiA )
+{
+ struct commonNaN z;
+
+ if ( softfloat_isSigNaNF64UI( uiA ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ z.sign = uiA>>63;
+ z.v64 = uiA<<12;
+ z.v0 = 0;
+ return z;
+
+}
+
diff --git a/softfloat/8086/s_isSigNaNF32UI.c b/softfloat/8086/s_isSigNaNF32UI.c new file mode 100755 index 0000000..0a9c33f --- /dev/null +++ b/softfloat/8086/s_isSigNaNF32UI.c @@ -0,0 +1,13 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +bool softfloat_isSigNaNF32UI( uint_fast32_t ui ) +{ + + return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); + +} + diff --git a/softfloat/8086/s_isSigNaNF64UI.c b/softfloat/8086/s_isSigNaNF64UI.c new file mode 100755 index 0000000..d255213 --- /dev/null +++ b/softfloat/8086/s_isSigNaNF64UI.c @@ -0,0 +1,15 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +bool softfloat_isSigNaNF64UI( uint_fast64_t ui ) +{ + + return + ( ( ui>>51 & 0xFFF ) == 0xFFE ) + && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) ); + +} + diff --git a/softfloat/8086/s_propagateNaNF32UI.c b/softfloat/8086/s_propagateNaNF32UI.c new file mode 100755 index 0000000..07774e8 --- /dev/null +++ b/softfloat/8086/s_propagateNaNF32UI.c @@ -0,0 +1,55 @@ +
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t
+ softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB )
+{
+ bool isNaNA, isSigNaNA, isNaNB, isSigNaNB;
+ uint_fast32_t uiMagA, uiMagB;
+
+ /*------------------------------------------------------------------------
+ *------------------------------------------------------------------------*/
+ isNaNA = isNaNF32UI( uiA );
+ isSigNaNA = softfloat_isSigNaNF32UI( uiA );
+ isNaNB = isNaNF32UI( uiB );
+ isSigNaNB = softfloat_isSigNaNF32UI( uiB );
+ /*------------------------------------------------------------------------
+ | Make NaNs non-signaling.
+ *------------------------------------------------------------------------*/
+ uiA |= 0x00400000;
+ uiB |= 0x00400000;
+ /*------------------------------------------------------------------------
+ *------------------------------------------------------------------------*/
+ if ( isSigNaNA | isSigNaNB ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ if ( isSigNaNA ) {
+ if ( isSigNaNB ) goto returnLargerSignificand;
+ return isNaNB ? uiB : uiA;
+ } else if ( isNaNA ) {
+ if ( isSigNaNB || ! isNaNB ) return uiA;
+ returnLargerSignificand:
+ uiMagA = uiA<<1;
+ uiMagB = uiB<<1;
+ if ( uiMagA < uiMagB ) return uiB;
+ if ( uiMagB < uiMagA ) return uiA;
+ return ( uiA < uiB ) ? uiA : uiB;
+ } else {
+ return uiB;
+ }
+
+}
+
diff --git a/softfloat/8086/s_propagateNaNF64UI.c b/softfloat/8086/s_propagateNaNF64UI.c new file mode 100755 index 0000000..0ff6446 --- /dev/null +++ b/softfloat/8086/s_propagateNaNF64UI.c @@ -0,0 +1,55 @@ + +/*** UPDATE COMMENTS. ***/ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Takes two double-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + bool isNaNA, isSigNaNA, isNaNB, isSigNaNB; + uint_fast64_t uiMagA, uiMagB; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + isNaNA = isNaNF64UI( uiA ); + isSigNaNA = softfloat_isSigNaNF64UI( uiA ); + isNaNB = isNaNF64UI( uiB ); + isSigNaNB = softfloat_isSigNaNF64UI( uiB ); + /*------------------------------------------------------------------------ + | Make NaNs non-signaling. + *------------------------------------------------------------------------*/ + uiA |= UINT64_C( 0x0008000000000000 ); + uiB |= UINT64_C( 0x0008000000000000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( isSigNaNA | isSigNaNB ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + if ( isSigNaNA ) { + if ( isSigNaNB ) goto returnLargerSignificand; + return isNaNB ? uiB : uiA; + } else if ( isNaNA ) { + if ( isSigNaNB || ! isNaNB ) return uiA; + returnLargerSignificand: + uiMagA = uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF ); + uiMagB = uiB & UINT64_C( 0x7FFFFFFFFFFFFFFF ); + if ( uiMagA < uiMagB ) return uiB; + if ( uiMagB < uiMagA ) return uiA; + return ( uiA < uiB ) ? uiA : uiB; + } else { + return uiB; + } + +} + diff --git a/softfloat/8086/softfloat_raiseFlags.c b/softfloat/8086/softfloat_raiseFlags.c new file mode 100755 index 0000000..c0c0dc8 --- /dev/null +++ b/softfloat/8086/softfloat_raiseFlags.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +*** FIX. + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `float_exception_flags |= flags;'. +*----------------------------------------------------------------------------*/ + +void softfloat_raiseFlags( int_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/softfloat/8086/softfloat_types.h b/softfloat/8086/softfloat_types.h new file mode 100755 index 0000000..b5c1828 --- /dev/null +++ b/softfloat/8086/softfloat_types.h @@ -0,0 +1,16 @@ + +#ifndef softfloat_types_h +#define softfloat_types_h + +/*** COMMENTS. ***/ + +#include <stdbool.h> +#include <stdint.h> + +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v; uint16_t x; } floatx80_t; +typedef struct { uint64_t v[ 2 ]; } float128_t; + +#endif + diff --git a/softfloat/8086/specialize.h b/softfloat/8086/specialize.h new file mode 100755 index 0000000..ca0bb1d --- /dev/null +++ b/softfloat/8086/specialize.h @@ -0,0 +1,113 @@ +
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define init_detectTininess softfloat_tininess_afterRounding;
+
+/*----------------------------------------------------------------------------
+| Structure used to transfer NaN representations from one format to another.
+*----------------------------------------------------------------------------*/
+struct commonNaN {
+ bool sign;
+ uint64_t v64, v0;
+};
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF32UI 0xFFC00000
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+ { return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); }
+#else
+bool softfloat_isSigNaNF32UI( uint_fast32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t );
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+ { return (uint_fast32_t) a.sign<<31 | 0x7FC00000 | a.v64>>41; }
+#else
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN );
+#endif
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast32_t softfloat_propagateNaNF32UI( uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF64UI UINT64_C(0xFFF8000000000000)
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+ return
+ ( ( ui>>51 & 0xFFF ) == 0xFFE )
+ && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+}
+#else
+bool softfloat_isSigNaNF64UI( uint_fast64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+/*** MIGHT BE INLINE'D. ***/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t );
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN );
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast64_t softfloat_propagateNaNF64UI( uint_fast64_t, uint_fast64_t );
+
diff --git a/softfloat/f32_add.c b/softfloat/f32_add.c new file mode 100755 index 0000000..dc53d68 --- /dev/null +++ b/softfloat/f32_add.c @@ -0,0 +1,29 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + float32_t ( *magsRoutine )( uint_fast32_t, uint_fast32_t, bool ); + + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + magsRoutine = + ( signA == signB ) ? softfloat_addMagsF32 : softfloat_subMagsF32; + return magsRoutine( uiA, uiB, signA ); + +} + diff --git a/softfloat/f32_div.c b/softfloat/f32_div.c new file mode 100755 index 0000000..958b140 --- /dev/null +++ b/softfloat/f32_div.c @@ -0,0 +1,96 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + if ( ! expB ) { + if ( ! sigB ) { + if ( ! ( expA | sigA ) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinity ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + expZ = expA - expB + 0x7D; + sigA = ( sigA | 0x00800000 )<<7; + sigB = ( sigB | 0x00800000 )<<8; + if ( sigB <= ( sigA + sigA ) ) { + ++expZ; + sigA >>= 1; + } + sigZ = ( (uint_fast64_t) sigA<<32 ) / sigB; + if ( ! ( sigZ & 0x3F ) ) { + sigZ |= ( (uint_fast64_t) sigB * sigZ != (uint_fast64_t) sigA<<32 ); + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f32_eq.c b/softfloat/f32_eq.c new file mode 100755 index 0000000..8f2306b --- /dev/null +++ b/softfloat/f32_eq.c @@ -0,0 +1,34 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return ( uiA == uiB ) || ! (uint32_t) ( ( uiA | uiB )<<1 ); + +} + diff --git a/softfloat/f32_eq_signaling.c b/softfloat/f32_eq_signaling.c new file mode 100755 index 0000000..bfba48a --- /dev/null +++ b/softfloat/f32_eq_signaling.c @@ -0,0 +1,29 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return ( uiA == uiB ) || ! (uint32_t) ( ( uiA | uiB )<<1 ); + +} + diff --git a/softfloat/f32_isSignalingNaN.c b/softfloat/f32_isSignalingNaN.c new file mode 100755 index 0000000..09aaa82 --- /dev/null +++ b/softfloat/f32_isSignalingNaN.c @@ -0,0 +1,16 @@ + +#include <stdbool.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/softfloat/f32_le.c b/softfloat/f32_le.c new file mode 100755 index 0000000..5f47be5 --- /dev/null +++ b/softfloat/f32_le.c @@ -0,0 +1,34 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + ( signA != signB ) ? signA || ! (uint32_t) ( ( uiA | uiB )<<1 ) + : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f32_le_quiet.c b/softfloat/f32_le_quiet.c new file mode 100755 index 0000000..2b541da --- /dev/null +++ b/softfloat/f32_le_quiet.c @@ -0,0 +1,39 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + ( signA != signB ) ? signA || ! (uint32_t) ( ( uiA | uiB )<<1 ) + : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f32_lt.c b/softfloat/f32_lt.c new file mode 100755 index 0000000..753b28a --- /dev/null +++ b/softfloat/f32_lt.c @@ -0,0 +1,34 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + ( signA != signB ) ? signA && ( (uint32_t) ( ( uiA | uiB )<<1 ) != 0 ) + : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f32_lt_quiet.c b/softfloat/f32_lt_quiet.c new file mode 100755 index 0000000..ecd90bf --- /dev/null +++ b/softfloat/f32_lt_quiet.c @@ -0,0 +1,39 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF32UI( uiA ) == 0xFF ) && fracF32UI( uiA ) ) + || ( ( expF32UI( uiB ) == 0xFF ) && fracF32UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + ( signA != signB ) ? signA && ( (uint32_t) ( ( uiA | uiB )<<1 ) != 0 ) + : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f32_mul.c b/softfloat/f32_mul.c new file mode 100755 index 0000000..d49c1dd --- /dev/null +++ b/softfloat/f32_mul.c @@ -0,0 +1,89 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_mul( float32_t a, float32_t b )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool signA;
+ int_fast16_t expA;
+ uint_fast32_t sigA;
+ union ui32_f32 uB;
+ uint_fast32_t uiB;
+ bool signB;
+ int_fast16_t expB;
+ uint_fast32_t sigB;
+ bool signZ;
+ uint_fast32_t magBits;
+ struct exp16_sig32 normExpSig;
+ int_fast16_t expZ;
+ uint_fast32_t sigZ, uiZ;
+ union ui32_f32 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ signA = signF32UI( uiA );
+ expA = expF32UI( uiA );
+ sigA = fracF32UI( uiA );
+ uB.f = b;
+ uiB = uB.ui;
+ signB = signF32UI( uiB );
+ expB = expF32UI( uiB );
+ sigB = fracF32UI( uiB );
+ signZ = signA ^ signB;
+ if ( expA == 0xFF ) {
+ if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN;
+ magBits = expB | sigB;
+ goto infArg;
+ }
+ if ( expB == 0xFF ) {
+ if ( sigB ) goto propagateNaN;
+ magBits = expA | sigA;
+ goto infArg;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) goto zero;
+ normExpSig = softfloat_normSubnormalF32Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ if ( ! expB ) {
+ if ( ! sigB ) goto zero;
+ normExpSig = softfloat_normSubnormalF32Sig( sigB );
+ expB = normExpSig.exp;
+ sigB = normExpSig.sig;
+ }
+ expZ = expA + expB - 0x7F;
+ sigA = ( sigA | 0x00800000 )<<7;
+ sigB = ( sigB | 0x00800000 )<<8;
+ sigZ = softfloat_shortShift64RightJam( (uint_fast64_t) sigA * sigB, 32 );
+ if ( sigZ < 0x40000000 ) {
+ --expZ;
+ sigZ <<= 1;
+ }
+ return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN:
+ uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+ goto uiZ;
+ infArg:
+ if ( ! magBits ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF32UI;
+ } else {
+ uiZ = packToF32UI( signZ, 0xFF, 0 );
+ }
+ goto uiZ;
+ zero:
+ uiZ = packToF32UI( signZ, 0, 0 );
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f32_mulAdd.c b/softfloat/f32_mulAdd.c new file mode 100755 index 0000000..3d4cee9 --- /dev/null +++ b/softfloat/f32_mulAdd.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ union ui32_f32 uB;
+ uint_fast32_t uiB;
+ union ui32_f32 uC;
+ uint_fast32_t uiC;
+
+ uA.f = a;
+ uiA = uA.ui;
+ uB.f = b;
+ uiB = uB.ui;
+ uC.f = c;
+ uiC = uC.ui;
+ return softfloat_mulAddF32( 0, uiA, uiB, uiC );
+
+}
+
diff --git a/softfloat/f32_rem.c b/softfloat/f32_rem.c new file mode 100755 index 0000000..d29b840 --- /dev/null +++ b/softfloat/f32_rem.c @@ -0,0 +1,124 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + int_fast16_t expDiff; + uint_fast32_t q; + uint_fast64_t sigA64, sigB64, q64; + uint_fast32_t alternateSigA; + uint32_t sigMean; + bool signZ; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + if ( expA == 0xFF ) { + if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + expDiff = expA - expB; + sigA |= 0x00800000; + sigB |= 0x00800000; + if ( expDiff < 32 ) { + sigA <<= 8; + sigB <<= 8; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + sigA >>= 1; + } + q = ( sigB <= sigA ); + if ( q ) sigA -= sigB; + if ( 0 < expDiff ) { + q = ( (uint_fast64_t) sigA<<32 ) / sigB; + q >>= 32 - expDiff; + sigB >>= 2; + sigA = ( ( sigA>>1 )<<( expDiff - 1 ) ) - sigB * q; + } else { + sigA >>= 2; + sigB >>= 2; + } + } else { + if ( sigB <= sigA ) sigA -= sigB; + sigA64 = (uint_fast64_t) sigA<<40; + sigB64 = (uint_fast64_t) sigB<<40; + expDiff -= 64; + while ( 0 < expDiff ) { + q64 = softfloat_estimateDiv128To64( sigA64, 0, sigB64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + sigA64 = - ( ( sigB * q64 )<<38 ); + expDiff -= 62; + } + expDiff += 64; + q64 = softfloat_estimateDiv128To64( sigA64, 0, sigB64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + q = q64>>( 64 - expDiff ); + sigB <<= 6; + sigA = ( ( sigA64>>33 )<<( expDiff - 1 ) ) - sigB * q; + } + do { + alternateSigA = sigA; + ++q; + sigA -= sigB; + } while ( sigA < 0x80000000 ); + sigMean = sigA + alternateSigA; + if ( ( 0x80000000 <= sigMean ) || ( ! sigMean && ( q & 1 ) ) ) { + sigA = alternateSigA; + } + signZ = ( 0x80000000 <= sigA ); + if ( signZ ) sigA = - sigA; + return softfloat_normRoundPackToF32( signA ^ signZ, expB, sigA ); + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f32_roundToInt.c b/softfloat/f32_roundToInt.c new file mode 100755 index 0000000..f8f9114 --- /dev/null +++ b/softfloat/f32_roundToInt.c @@ -0,0 +1,78 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, int_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t expA; + uint_fast32_t uiZ; + bool signA; + uint_fast32_t lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + uA.f = a; + uiA = uA.ui; + expA = expF32UI( uiA ); + if ( 0x96 <= expA ) { + if ( ( expA == 0xFF ) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + if ( expA <= 0x7E ) { + if ( ! (uint32_t) ( uiA<<1 ) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + signA = signF32UI( uiA ); + switch ( roundingMode ) { + case softfloat_round_nearest_even: + if ( ( expA == 0x7E ) && fracF32UI( uiA ) ) { + uiZ = packToF32UI( signA, 0x7F, 0 ); + goto uiZ; + } + break; + case softfloat_round_min: + uiZ = signA ? 0xBF800000 : 0; + goto uiZ; + case softfloat_round_max: + uiZ = signA ? 0x80000000 : 0x3F800000; + goto uiZ; + case softfloat_round_nearest_maxMag: + if ( expA == 0x7E ) { + uiZ = packToF32UI( signA, 0x7F, 0 ); + goto uiZ; + } + break; + } + uiZ = packToF32UI( signA, 0, 0 ); + goto uiZ; + } + lastBitMask = (uint_fast32_t) 1<<( 0x96 - expA ); + roundBitsMask = lastBitMask - 1; + uiZ = uiA; + if ( roundingMode == softfloat_round_nearest_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_nearest_even ) { + uiZ += lastBitMask>>1; + if ( ! ( uiZ & roundBitsMask ) ) uiZ &= ~ lastBitMask; + } else if ( roundingMode != softfloat_round_minMag ) { + if ( signF32UI( uiZ ) ^ ( roundingMode == softfloat_round_max ) ) { + uiZ += roundBitsMask; + } + } + uiZ &= ~ roundBitsMask; + if ( exact && ( uiZ != uiA ) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f32_sqrt.c b/softfloat/f32_sqrt.c new file mode 100755 index 0000000..c9eb907 --- /dev/null +++ b/softfloat/f32_sqrt.c @@ -0,0 +1,74 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f32_sqrt( float32_t a )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool signA;
+ int_fast16_t expA;
+ uint_fast32_t sigA, uiZ;
+ struct exp16_sig32 normExpSig;
+ int_fast16_t expZ;
+ uint_fast32_t sigZ;
+ uint_fast64_t term, rem;
+ union ui32_f32 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ signA = signF32UI( uiA );
+ expA = expF32UI( uiA );
+ sigA = fracF32UI( uiA );
+ if ( expA == 0xFF ) {
+ if ( sigA ) {
+ uiZ = softfloat_propagateNaNF32UI( uiA, 0 );
+ goto uiZ;
+ }
+ if ( ! signA ) return a;
+ goto invalid;
+ }
+ if ( signA ) {
+ if ( ! ( expA | sigA ) ) return a;
+ goto invalid;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) return a;
+ normExpSig = softfloat_normSubnormalF32Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ expZ = ( ( expA - 0x7F )>>1 ) + 0x7E;
+ sigA = ( sigA | 0x00800000 )<<8;
+ sigZ = softfloat_estimateSqrt32( expA, sigA ) + 2;
+ if ( ( sigZ & 0x7F ) <= 5 ) {
+ if ( sigZ < 2 ) {
+ sigZ = 0x7FFFFFFF;
+ goto roundPack;
+ }
+ sigA >>= expA & 1;
+ term = (uint_fast64_t) sigZ * sigZ;
+ rem = ( (uint_fast64_t) sigA<<32 ) - term;
+ while ( UINT64_C( 0x8000000000000000 ) <= rem ) {
+ --sigZ;
+ rem += ( (uint_fast64_t) sigZ<<1 ) | 1;
+ }
+ sigZ |= ( rem != 0 );
+ }
+ sigZ = softfloat_shortShift32Right1Jam( sigZ );
+ roundPack:
+ return softfloat_roundPackToF32( 0, expZ, sigZ );
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF32UI;
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f32_sub.c b/softfloat/f32_sub.c new file mode 100755 index 0000000..c64df8e --- /dev/null +++ b/softfloat/f32_sub.c @@ -0,0 +1,29 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t f32_sub( float32_t a, float32_t b )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool signA;
+ union ui32_f32 uB;
+ uint_fast32_t uiB;
+ bool signB;
+ float32_t ( *magsRoutine )( uint_fast32_t, uint_fast32_t, bool );
+
+ uA.f = a;
+ uiA = uA.ui;
+ signA = signF32UI( uiA );
+ uB.f = b;
+ uiB = uB.ui;
+ signB = signF32UI( uiB );
+ magsRoutine =
+ ( signA == signB ) ? softfloat_subMagsF32 : softfloat_addMagsF32;
+ return magsRoutine( uiA, uiB ^ 0x80000000, signA );
+
+}
+
diff --git a/softfloat/f32_to_f64.c b/softfloat/f32_to_f64.c new file mode 100755 index 0000000..9f0ae5c --- /dev/null +++ b/softfloat/f32_to_f64.c @@ -0,0 +1,47 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f32_to_f64( float32_t a )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ uint_fast64_t uiZ;
+ struct exp16_sig32 normExpSig;
+ union ui64_f64 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF32UI( uiA );
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp == 0xFF ) {
+ uiZ =
+ sig ? softfloat_commonNaNToF64UI(
+ softfloat_f32UIToCommonNaN( uiA ) )
+ : packToF64UI( sign, 0x7FF, 0 );
+ goto uiZ;
+ }
+ if ( ! exp ) {
+ if ( ! sig ) {
+ uiZ = packToF64UI( sign, 0, 0 );
+ goto uiZ;
+ }
+ normExpSig = softfloat_normSubnormalF32Sig( sig );
+ exp = normExpSig.exp - 1;
+ sig = normExpSig.sig;
+ }
+ uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) sig<<29 );
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f32_to_i32.c b/softfloat/f32_to_i32.c new file mode 100755 index 0000000..bbbaee0 --- /dev/null +++ b/softfloat/f32_to_i32.c @@ -0,0 +1,34 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f32_to_i32( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ uint_fast64_t sig64;
+ int_fast16_t shiftCount;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF32UI( uiA );
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( ( exp == 0xFF ) && sig ) sign = 0;
+ if ( exp ) sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<32;
+ shiftCount = 0xAF - exp;
+ if ( 0 < shiftCount ) {
+ sig64 = softfloat_shift64RightJam( sig64, shiftCount );
+ }
+ return softfloat_roundPackToI32( sign, sig64, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f32_to_i32_r_minMag.c b/softfloat/f32_to_i32_r_minMag.c new file mode 100755 index 0000000..63ff1e2 --- /dev/null +++ b/softfloat/f32_to_i32_r_minMag.c @@ -0,0 +1,45 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ bool sign;
+ int_fast16_t shiftCount;
+ int_fast32_t absZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp < 0x7F ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ sign = signF32UI( uiA );
+ shiftCount = 0x9E - exp;
+ if ( shiftCount <= 0 ) {
+ if ( uiA != packToF32UI( 1, 0x9E, 0 ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ if ( ! sign || ( ( exp == 0xFF ) && sig ) ) return 0x7FFFFFFF;
+ }
+ return -0x7FFFFFFF - 1;
+ }
+ sig = ( sig | 0x00800000 )<<8;
+ absZ = sig>>shiftCount;
+ if ( exact && (uint32_t) ( sig<<( ( - shiftCount ) & 31 ) ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return sign ? - absZ : absZ;
+
+}
+
diff --git a/softfloat/f32_to_i64.c b/softfloat/f32_to_i64.c new file mode 100755 index 0000000..c0b8981 --- /dev/null +++ b/softfloat/f32_to_i64.c @@ -0,0 +1,44 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f32_to_i64( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ int_fast16_t shiftCount;
+ uint_fast64_t sig64, extra;
+ struct uint64_extra sig64Extra;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF32UI( uiA );
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ shiftCount = 0xBE - exp;
+ if ( shiftCount < 0 ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ if ( ! sign || ( ( exp == 0xFF ) && sig ) ) {
+ return INT64_C( 0x7FFFFFFFFFFFFFFF );
+ }
+ return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+ }
+ if ( exp ) sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<40;
+ extra = 0;
+ if ( shiftCount ) {
+ sig64Extra = softfloat_shift64ExtraRightJam( sig64, 0, shiftCount );
+ sig64 = sig64Extra.v;
+ extra = sig64Extra.extra;
+ }
+ return softfloat_roundPackToI64( sign, sig64, extra, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f32_to_i64_r_minMag.c b/softfloat/f32_to_i64_r_minMag.c new file mode 100755 index 0000000..33bff93 --- /dev/null +++ b/softfloat/f32_to_i64_r_minMag.c @@ -0,0 +1,52 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ bool sign;
+ int_fast16_t shiftCount;
+ uint_fast64_t sig64;
+ int_fast64_t absZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp < 0x7F ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ sign = signF32UI( uiA );
+ shiftCount = 0xBE - exp;
+ if ( shiftCount <= 0 ) {
+ if ( uiA != packToF32UI( 1, 0xBE, 0 ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ if ( ! sign || ( ( exp == 0xFF ) && sig ) ) {
+ return INT64_C( 0x7FFFFFFFFFFFFFFF );
+ }
+ }
+ return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+ }
+ sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<40;
+ absZ = sig64>>shiftCount;
+ shiftCount = 40 - shiftCount;
+ if (
+ exact && ( shiftCount < 0 ) && (uint32_t) ( sig<<( shiftCount & 31 ) )
+ ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return sign ? - absZ : absZ;
+
+}
+
diff --git a/softfloat/f32_to_ui32.c b/softfloat/f32_to_ui32.c new file mode 100755 index 0000000..3501db8 --- /dev/null +++ b/softfloat/f32_to_ui32.c @@ -0,0 +1,33 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f32_to_ui32( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ uint_fast64_t sig64;
+ int_fast16_t shiftCount;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF32UI( uiA );
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp ) sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<32;
+ shiftCount = 0xAF - exp;
+ if ( 0 < shiftCount ) {
+ sig64 = softfloat_shift64RightJam( sig64, shiftCount );
+ }
+ return softfloat_roundPackToUI32( sign, sig64, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f32_to_ui32_r_minMag.c b/softfloat/f32_to_ui32_r_minMag.c new file mode 100755 index 0000000..edd858d --- /dev/null +++ b/softfloat/f32_to_ui32_r_minMag.c @@ -0,0 +1,41 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ int_fast16_t shiftCount;
+ uint_fast32_t z;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp < 0x7F ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ if ( signF32UI( uiA ) ) goto invalid;
+ shiftCount = 0x9E - exp;
+ if ( shiftCount < 0 ) goto invalid;
+ sig = ( sig | 0x00800000 )<<8;
+ z = sig>>shiftCount;
+ if ( exact && ( sig & ( ( (uint_fast32_t) 1<<shiftCount ) - 1 ) ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return z;
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return 0xFFFFFFFF;
+
+}
+
diff --git a/softfloat/f32_to_ui64.c b/softfloat/f32_to_ui64.c new file mode 100755 index 0000000..6cdcf74 --- /dev/null +++ b/softfloat/f32_to_ui64.c @@ -0,0 +1,42 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f32_to_ui64( float32_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ int_fast16_t shiftCount;
+ uint_fast64_t sig64, extra;
+ struct uint64_extra sig64Extra;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF32UI( uiA );
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ shiftCount = 0xBE - exp;
+ if ( shiftCount < 0 ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+ }
+ if ( exp ) sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<40;
+ extra = 0;
+ if ( shiftCount ) {
+ sig64Extra = softfloat_shift64ExtraRightJam( sig64, 0, shiftCount );
+ sig64 = sig64Extra.v;
+ extra = sig64Extra.extra;
+ }
+ return
+ softfloat_roundPackToUI64( sign, sig64, extra, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f32_to_ui64_r_minMag.c b/softfloat/f32_to_ui64_r_minMag.c new file mode 100755 index 0000000..738d6b1 --- /dev/null +++ b/softfloat/f32_to_ui64_r_minMag.c @@ -0,0 +1,45 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact )
+{
+ union ui32_f32 uA;
+ uint_fast32_t uiA;
+ int_fast16_t exp;
+ uint_fast32_t sig;
+ int_fast16_t shiftCount;
+ uint_fast64_t sig64, z;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF32UI( uiA );
+ sig = fracF32UI( uiA );
+ if ( exp < 0x7F ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ if ( signF32UI( uiA ) ) goto invalid;
+ shiftCount = 0xBE - exp;
+ if ( shiftCount < 0 ) goto invalid;
+ sig |= 0x00800000;
+ sig64 = (uint_fast64_t) sig<<40;
+ z = sig64>>shiftCount;
+ shiftCount = 40 - shiftCount;
+ if (
+ exact && ( shiftCount < 0 ) && (uint32_t) ( sig<<( shiftCount & 31 ) )
+ ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return z;
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+
+}
+
diff --git a/softfloat/f64_add.c b/softfloat/f64_add.c new file mode 100755 index 0000000..9ec4b5f --- /dev/null +++ b/softfloat/f64_add.c @@ -0,0 +1,29 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + float64_t ( *magsRoutine )( uint_fast64_t, uint_fast64_t, bool ); + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + magsRoutine = + ( signA == signB ) ? softfloat_addMagsF64 : softfloat_subMagsF64; + return magsRoutine( uiA, uiB, signA ); + +} + diff --git a/softfloat/f64_div.c b/softfloat/f64_div.c new file mode 100755 index 0000000..9bc72b3 --- /dev/null +++ b/softfloat/f64_div.c @@ -0,0 +1,104 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint_fast64_t sigZ; + struct uint128 term, rem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + if ( ! expB ) { + if ( ! sigB ) { + if ( ! ( expA | sigA ) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinity ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + expZ = expA - expB + 0x3FD; + sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10; + sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11; + if ( sigB <= ( sigA + sigA ) ) { + ++expZ; + sigA >>= 1; + } + sigZ = softfloat_estimateDiv128To64( sigA, 0, sigB ); + if ( ( sigZ & 0x1FF ) <= 2 ) { + term = softfloat_mul64To128( sigB, sigZ ); + rem = softfloat_sub128( sigA, 0, term.v64, term.v0 ); + while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) { + --sigZ; + rem = softfloat_add128( rem.v64, rem.v0, 0, sigB ); + } + sigZ |= ( rem.v0 != 0 ); + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f64_eq.c b/softfloat/f64_eq.c new file mode 100755 index 0000000..925aabc --- /dev/null +++ b/softfloat/f64_eq.c @@ -0,0 +1,35 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return + ( uiA == uiB ) || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ); + +} + diff --git a/softfloat/f64_eq_signaling.c b/softfloat/f64_eq_signaling.c new file mode 100755 index 0000000..7a54dc1 --- /dev/null +++ b/softfloat/f64_eq_signaling.c @@ -0,0 +1,30 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return + ( uiA == uiB ) || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ); + +} + diff --git a/softfloat/f64_isSignalingNaN.c b/softfloat/f64_isSignalingNaN.c new file mode 100755 index 0000000..d720ac1 --- /dev/null +++ b/softfloat/f64_isSignalingNaN.c @@ -0,0 +1,16 @@ + +#include <stdbool.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/softfloat/f64_le.c b/softfloat/f64_le.c new file mode 100755 index 0000000..e6c5caf --- /dev/null +++ b/softfloat/f64_le.c @@ -0,0 +1,35 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + ( signA != signB ) + ? signA || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f64_le_quiet.c b/softfloat/f64_le_quiet.c new file mode 100755 index 0000000..e9b7ede --- /dev/null +++ b/softfloat/f64_le_quiet.c @@ -0,0 +1,40 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + ( signA != signB ) + ? signA || ! ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + : ( uiA == uiB ) || ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f64_lt.c b/softfloat/f64_lt.c new file mode 100755 index 0000000..1b2f696 --- /dev/null +++ b/softfloat/f64_lt.c @@ -0,0 +1,35 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + ( signA != signB ) + ? signA && ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f64_lt_quiet.c b/softfloat/f64_lt_quiet.c new file mode 100755 index 0000000..f27e6da --- /dev/null +++ b/softfloat/f64_lt_quiet.c @@ -0,0 +1,40 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( + ( ( expF64UI( uiA ) == 0x7FF ) && fracF64UI( uiA ) ) + || ( ( expF64UI( uiB ) == 0x7FF ) && fracF64UI( uiB ) ) + ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + ( signA != signB ) + ? signA && ( ( uiA | uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + : ( uiA != uiB ) && ( signA ^ ( uiA < uiB ) ); + +} + diff --git a/softfloat/f64_mul.c b/softfloat/f64_mul.c new file mode 100755 index 0000000..4b5dc4e --- /dev/null +++ b/softfloat/f64_mul.c @@ -0,0 +1,91 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_mul( float64_t a, float64_t b )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool signA;
+ int_fast16_t expA;
+ uint_fast64_t sigA;
+ union ui64_f64 uB;
+ uint_fast64_t uiB;
+ bool signB;
+ int_fast16_t expB;
+ uint_fast64_t sigB;
+ bool signZ;
+ uint_fast64_t magBits;
+ struct exp16_sig64 normExpSig;
+ int_fast16_t expZ;
+ struct uint128 sigZ128;
+ uint_fast64_t sigZ, uiZ;
+ union ui64_f64 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ signA = signF64UI( uiA );
+ expA = expF64UI( uiA );
+ sigA = fracF64UI( uiA );
+ uB.f = b;
+ uiB = uB.ui;
+ signB = signF64UI( uiB );
+ expB = expF64UI( uiB );
+ sigB = fracF64UI( uiB );
+ signZ = signA ^ signB;
+ if ( expA == 0x7FF ) {
+ if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN;
+ magBits = expB | sigB;
+ goto infArg;
+ }
+ if ( expB == 0x7FF ) {
+ if ( sigB ) goto propagateNaN;
+ magBits = expA | sigA;
+ goto infArg;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) goto zero;
+ normExpSig = softfloat_normSubnormalF64Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ if ( ! expB ) {
+ if ( ! sigB ) goto zero;
+ normExpSig = softfloat_normSubnormalF64Sig( sigB );
+ expB = normExpSig.exp;
+ sigB = normExpSig.sig;
+ }
+ expZ = expA + expB - 0x3FF;
+ sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;
+ sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11;
+ sigZ128 = softfloat_mul64To128( sigA, sigB );
+ sigZ = sigZ128.v64 | ( sigZ128.v0 != 0 );
+ if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {
+ --expZ;
+ sigZ <<= 1;
+ }
+ return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN:
+ uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+ goto uiZ;
+ infArg:
+ if ( ! magBits ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF64UI;
+ } else {
+ uiZ = packToF64UI( signZ, 0x7FF, 0 );
+ }
+ goto uiZ;
+ zero:
+ uiZ = packToF64UI( signZ, 0, 0 );
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f64_mulAdd.c b/softfloat/f64_mulAdd.c new file mode 100755 index 0000000..fa1669a --- /dev/null +++ b/softfloat/f64_mulAdd.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ union ui64_f64 uB;
+ uint_fast64_t uiB;
+ union ui64_f64 uC;
+ uint_fast64_t uiC;
+
+ uA.f = a;
+ uiA = uA.ui;
+ uB.f = b;
+ uiB = uB.ui;
+ uC.f = c;
+ uiC = uC.ui;
+ return softfloat_mulAddF64( 0, uiA, uiB, uiC );
+
+}
+
diff --git a/softfloat/f64_rem.c b/softfloat/f64_rem.c new file mode 100755 index 0000000..08fcd78 --- /dev/null +++ b/softfloat/f64_rem.c @@ -0,0 +1,113 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + int_fast16_t expDiff; + uint_fast64_t q, alternateSigA; + uint64_t sigMean; + bool signZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + if ( expA == 0x7FF ) { + if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + expDiff = expA - expB; + sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<11; + sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<11; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + sigA >>= 1; + } + q = ( sigB <= sigA ); + if ( q ) sigA -= sigB; + expDiff -= 64; + while ( 0 < expDiff ) { + q = softfloat_estimateDiv128To64( sigA, 0, sigB ); + q = ( 2 < q ) ? q - 2 : 0; + sigA = - ( ( sigB>>2 ) * q ); + expDiff -= 62; + } + expDiff += 64; + if ( 0 < expDiff ) { + q = softfloat_estimateDiv128To64( sigA, 0, sigB ); + q = ( 2 < q ) ? q - 2 : 0; + q >>= 64 - expDiff; + sigB >>= 2; + sigA = ( ( sigA>>1 )<<( expDiff - 1 ) ) - sigB * q; + } else { + sigA >>= 2; + sigB >>= 2; + } + do { + alternateSigA = sigA; + ++q; + sigA -= sigB; + } while ( sigA < UINT64_C( 0x8000000000000000 ) ); + sigMean = sigA + alternateSigA; + if ( + ( UINT64_C( 0x8000000000000000 ) <= sigMean ) + || ( ! sigMean && ( q & 1 ) ) + ) { + sigA = alternateSigA; + } + signZ = ( UINT64_C( 0x8000000000000000 ) <= sigA ); + if ( signZ ) sigA = - sigA; + return softfloat_normRoundPackToF64( signA ^ signZ, expB, sigA ); + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f64_roundToInt.c b/softfloat/f64_roundToInt.c new file mode 100755 index 0000000..ef16dfa --- /dev/null +++ b/softfloat/f64_roundToInt.c @@ -0,0 +1,80 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, int_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t expA; + uint_fast64_t uiZ; + bool signA; + uint_fast64_t lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + uA.f = a; + uiA = uA.ui; + expA = expF64UI( uiA ); + if ( 0x433 <= expA ) { + if ( ( expA == 0x7FF ) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + if ( expA <= 0x3FE ) { + if ( ! ( uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + signA = signF64UI( uiA ); + switch ( roundingMode ) { + case softfloat_round_nearest_even: + if ( ( expA == 0x3FE ) && fracF64UI( uiA ) ) { + uiZ = packToF64UI( signA, 0x3FF, 0 ); + goto uiZ; + } + break; + case softfloat_round_min: + uiZ = signA ? UINT64_C( 0xBFF0000000000000 ) : 0; + goto uiZ; + case softfloat_round_max: + uiZ = + signA ? UINT64_C( 0x8000000000000000 ) + : UINT64_C( 0x3FF0000000000000 ); + goto uiZ; + case softfloat_round_nearest_maxMag: + if ( expA == 0x3FE ) { + uiZ = packToF64UI( signA, 0x3FF, 0 ); + goto uiZ; + } + break; + } + uiZ = packToF64UI( signA, 0, 0 ); + goto uiZ; + } + lastBitMask = (uint_fast64_t) 1<<( 0x433 - expA ); + roundBitsMask = lastBitMask - 1; + uiZ = uiA; + if ( roundingMode == softfloat_round_nearest_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_nearest_even ) { + uiZ += lastBitMask>>1; + if ( ! ( uiZ & roundBitsMask ) ) uiZ &= ~ lastBitMask; + } else if ( roundingMode != softfloat_round_minMag ) { + if ( signF64UI( uiZ ) ^ ( roundingMode == softfloat_round_max ) ) { + uiZ += roundBitsMask; + } + } + uiZ &= ~ roundBitsMask; + if ( exact && ( uiZ != uiA ) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/f64_sqrt.c b/softfloat/f64_sqrt.c new file mode 100755 index 0000000..cd91010 --- /dev/null +++ b/softfloat/f64_sqrt.c @@ -0,0 +1,74 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t f64_sqrt( float64_t a )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool signA;
+ int_fast16_t expA;
+ uint_fast64_t sigA, uiZ;
+ struct exp16_sig64 normExpSig;
+ int_fast16_t expZ;
+ uint_fast32_t sigZ32;
+ uint_fast64_t sigZ;
+ struct uint128 term, rem;
+ union ui64_f64 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ signA = signF64UI( uiA );
+ expA = expF64UI( uiA );
+ sigA = fracF64UI( uiA );
+ if ( expA == 0x7FF ) {
+ if ( sigA ) {
+ uiZ = softfloat_propagateNaNF64UI( uiA, 0 );
+ goto uiZ;
+ }
+ if ( ! signA ) return a;
+ goto invalid;
+ }
+ if ( signA ) {
+ if ( ! ( expA | sigA ) ) return a;
+ goto invalid;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) return a;
+ normExpSig = softfloat_normSubnormalF64Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ expZ = ( ( expA - 0x3FF )>>1 ) + 0x3FE;
+ sigA |= UINT64_C( 0x0010000000000000 );
+ sigZ32 = softfloat_estimateSqrt32( expA, sigA>>21 );
+ sigA <<= 9 - ( expA & 1 );
+ sigZ =
+ softfloat_estimateDiv128To64( sigA, 0, (uint_fast64_t) sigZ32<<32 )
+ + ( (uint_fast64_t) sigZ32<<30 );
+ if ( ( sigZ & 0x1FF ) <= 5 ) {
+ term = softfloat_mul64To128( sigZ, sigZ );
+ rem = softfloat_sub128( sigA, 0, term.v64, term.v0 );
+ while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) {
+ --sigZ;
+ rem =
+ softfloat_add128(
+ rem.v64, rem.v0, sigZ>>63, (uint64_t) ( sigZ<<1 ) );
+ }
+ sigZ |= ( ( rem.v64 | rem.v0 ) != 0 );
+ }
+ return softfloat_roundPackToF64( 0, expZ, sigZ );
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF64UI;
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f64_sub.c b/softfloat/f64_sub.c new file mode 100755 index 0000000..38bd574 --- /dev/null +++ b/softfloat/f64_sub.c @@ -0,0 +1,29 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + float64_t ( *magsRoutine )( uint_fast64_t, uint_fast64_t, bool ); + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + magsRoutine = + ( signA == signB ) ? softfloat_subMagsF64 : softfloat_addMagsF64; + return magsRoutine( uiA, uiB, signA ); + +} + diff --git a/softfloat/f64_to_f32.c b/softfloat/f64_to_f32.c new file mode 100755 index 0000000..395d6c6 --- /dev/null +++ b/softfloat/f64_to_f32.c @@ -0,0 +1,43 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t f64_to_f32( float64_t a )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ uint_fast32_t uiZ, sig32;
+ union ui32_f32 uZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp == 0x7FF ) {
+ uiZ =
+ sig ? softfloat_commonNaNToF32UI(
+ softfloat_f64UIToCommonNaN( uiA ) )
+ : packToF32UI( sign, 0xFF, 0 );
+ goto uiZ;
+ }
+ sig32 = softfloat_shortShift64RightJam( sig, 22 );
+ if ( ! ( exp | sig32 ) ) {
+ uiZ = packToF32UI( sign, 0, 0 );
+ goto uiZ;
+ }
+ return softfloat_roundPackToF32( sign, exp - 0x381, sig32 | 0x40000000 );
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/f64_to_i32.c b/softfloat/f64_to_i32.c new file mode 100755 index 0000000..0778a86 --- /dev/null +++ b/softfloat/f64_to_i32.c @@ -0,0 +1,30 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f64_to_i32( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( ( exp == 0x7FF ) && sig ) sign = 0;
+ if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x42C - exp;
+ if ( 0 < shiftCount ) sig = softfloat_shift64RightJam( sig, shiftCount );
+ return softfloat_roundPackToI32( sign, sig, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f64_to_i32_r_minMag.c b/softfloat/f64_to_i32_r_minMag.c new file mode 100755 index 0000000..39246c2 --- /dev/null +++ b/softfloat/f64_to_i32_r_minMag.c @@ -0,0 +1,50 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ bool sign;
+ int_fast16_t shiftCount;
+ uint_fast32_t absZ;
+ union { uint32_t ui; int32_t i; } uZ;
+ int_fast32_t z;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp < 0x3FF ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ sign = signF64UI( uiA );
+ if ( 0x41E < exp ) {
+ if ( ( exp == 0x7FF ) && sig ) sign = 0;
+ goto invalid;
+ }
+ sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x433 - exp;
+ absZ = sig>>shiftCount;
+ uZ.ui = sign ? - absZ : absZ;
+ z = uZ.i;
+ if ( ( z < 0 ) != sign ) goto invalid;
+ if ( exact && ( (uint_fast64_t) absZ<<shiftCount != sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return z;
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return sign ? -0x7FFFFFFF - 1 : 0x7FFFFFFF;
+
+}
+
diff --git a/softfloat/f64_to_i64.c b/softfloat/f64_to_i64.c new file mode 100755 index 0000000..89663ee --- /dev/null +++ b/softfloat/f64_to_i64.c @@ -0,0 +1,46 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f64_to_i64( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+ struct uint64_extra sigExtra;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x433 - exp;
+ if ( shiftCount <= 0 ) {
+ if ( 0x43E < exp ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return
+ ! sign
+ || ( ( exp == 0x7FF )
+ && ( sig != UINT64_C( 0x0010000000000000 ) ) )
+ ? INT64_C( 0x7FFFFFFFFFFFFFFF )
+ : - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+ }
+ sigExtra.v = sig<<( - shiftCount );
+ sigExtra.extra = 0;
+ } else {
+ sigExtra = softfloat_shift64ExtraRightJam( sig, 0, shiftCount );
+ }
+ return
+ softfloat_roundPackToI64(
+ sign, sigExtra.v, sigExtra.extra, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f64_to_i64_r_minMag.c b/softfloat/f64_to_i64_r_minMag.c new file mode 100755 index 0000000..525705b --- /dev/null +++ b/softfloat/f64_to_i64_r_minMag.c @@ -0,0 +1,52 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+ int_fast64_t absZ;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ shiftCount = exp - 0x433;
+ if ( 0 <= shiftCount ) {
+ if ( 0x43E <= exp ) {
+ if ( uiA != packToF64UI( 1, 0x43E, 0 ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ if ( ! sign || ( ( exp == 0x7FF ) && sig ) ) {
+ return INT64_C( 0x7FFFFFFFFFFFFFFF );
+ }
+ }
+ return - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1;
+ }
+ sig |= UINT64_C( 0x0010000000000000 );
+ absZ = sig<<shiftCount;
+ } else {
+ if ( exp < 0x3FF ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ sig |= UINT64_C( 0x0010000000000000 );
+ absZ = sig>>( - shiftCount );
+ if ( exact && (uint64_t) ( sig<<( shiftCount & 63 ) ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ }
+ return sign ? - absZ : absZ;
+
+}
+
diff --git a/softfloat/f64_to_ui32.c b/softfloat/f64_to_ui32.c new file mode 100755 index 0000000..b186605 --- /dev/null +++ b/softfloat/f64_to_ui32.c @@ -0,0 +1,29 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f64_to_ui32( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x42C - exp;
+ if ( 0 < shiftCount ) sig = softfloat_shift64RightJam( sig, shiftCount );
+ return softfloat_roundPackToUI32( sign, sig, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f64_to_ui32_r_minMag.c b/softfloat/f64_to_ui32_r_minMag.c new file mode 100755 index 0000000..9f1dd4d --- /dev/null +++ b/softfloat/f64_to_ui32_r_minMag.c @@ -0,0 +1,40 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+ uint_fast32_t z;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp < 0x3FF ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ if ( signF64UI( uiA ) || ( 0x41E < exp ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return 0xFFFFFFFF;
+ }
+ sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x433 - exp;
+ z = sig>>shiftCount;
+ if ( exact && ( (uint_fast64_t) z<<shiftCount != sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return z;
+
+}
+
diff --git a/softfloat/f64_to_ui64.c b/softfloat/f64_to_ui64.c new file mode 100755 index 0000000..9afebd7 --- /dev/null +++ b/softfloat/f64_to_ui64.c @@ -0,0 +1,41 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f64_to_ui64( float64_t a, int_fast8_t roundingMode, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ bool sign;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+ struct uint64_extra sigExtra;
+
+ uA.f = a;
+ uiA = uA.ui;
+ sign = signF64UI( uiA );
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp ) sig |= UINT64_C( 0x0010000000000000 );
+ shiftCount = 0x433 - exp;
+ if ( shiftCount <= 0 ) {
+ if ( 0x43E < exp ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+ }
+ sigExtra.v = sig<<( - shiftCount );
+ sigExtra.extra = 0;
+ } else {
+ sigExtra = softfloat_shift64ExtraRightJam( sig, 0, shiftCount );
+ }
+ return
+ softfloat_roundPackToUI64(
+ sign, sigExtra.v, sigExtra.extra, roundingMode, exact );
+
+}
+
diff --git a/softfloat/f64_to_ui64_r_minMag.c b/softfloat/f64_to_ui64_r_minMag.c new file mode 100755 index 0000000..a66d3ff --- /dev/null +++ b/softfloat/f64_to_ui64_r_minMag.c @@ -0,0 +1,45 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact )
+{
+ union ui64_f64 uA;
+ uint_fast64_t uiA;
+ int_fast16_t exp;
+ uint_fast64_t sig;
+ int_fast16_t shiftCount;
+ uint_fast64_t z;
+
+ uA.f = a;
+ uiA = uA.ui;
+ exp = expF64UI( uiA );
+ sig = fracF64UI( uiA );
+ if ( exp < 0x3FF ) {
+ if ( exact && ( exp | sig ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ return 0;
+ }
+ if ( signF64UI( uiA ) ) goto invalid;
+ shiftCount = exp - 0x433;
+ if ( 0 <= shiftCount ) {
+ if ( 0x43E < exp ) goto invalid;
+ z = ( sig | UINT64_C( 0x0010000000000000 ) )<<shiftCount;
+ } else {
+ sig |= UINT64_C( 0x0010000000000000 );
+ z = sig>>( - shiftCount );
+ if ( exact && (uint64_t) ( sig<<( shiftCount & 63 ) ) ) {
+ softfloat_exceptionFlags |= softfloat_flag_inexact;
+ }
+ }
+ return z;
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ return UINT64_C( 0xFFFFFFFFFFFFFFFF );
+
+}
+
diff --git a/softfloat/i32_to_f32.c b/softfloat/i32_to_f32.c new file mode 100755 index 0000000..f51facd --- /dev/null +++ b/softfloat/i32_to_f32.c @@ -0,0 +1,21 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t i32_to_f32( int_fast32_t a )
+{
+ bool sign;
+ union ui32_f32 uZ;
+
+ sign = ( a < 0 );
+ if ( ! ( a & 0x7FFFFFFF ) ) {
+ uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0;
+ return uZ.f;
+ }
+ return softfloat_normRoundPackToF32( sign, 0x9C, sign ? - a : a );
+
+}
+
diff --git a/softfloat/i32_to_f64.c b/softfloat/i32_to_f64.c new file mode 100755 index 0000000..d42cbe8 --- /dev/null +++ b/softfloat/i32_to_f64.c @@ -0,0 +1,31 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t i32_to_f64( int_fast32_t a )
+{
+ uint_fast64_t uiZ;
+ bool sign;
+ uint_fast32_t absA;
+ int shiftCount;
+ union ui64_f64 uZ;
+
+ if ( ! a ) {
+ uiZ = 0;
+ } else {
+ sign = ( a < 0 );
+ absA = sign ? - a : a;
+ shiftCount = softfloat_countLeadingZeros32( absA ) + 21;
+ uiZ =
+ packToF64UI(
+ sign, 0x432 - shiftCount, (uint_fast64_t) absA<<shiftCount );
+ }
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/i64_to_f32.c b/softfloat/i64_to_f32.c new file mode 100755 index 0000000..4fecbb9 --- /dev/null +++ b/softfloat/i64_to_f32.c @@ -0,0 +1,36 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t i64_to_f32( int_fast64_t a )
+{
+ bool sign;
+ uint_fast64_t absA;
+ int shiftCount;
+ union ui32_f32 u;
+ uint_fast32_t sig;
+
+ sign = ( a < 0 );
+ absA = sign ? - (uint_fast64_t) a : a;
+ shiftCount = softfloat_countLeadingZeros64( absA ) - 40;
+ if ( 0 <= shiftCount ) {
+ u.ui =
+ a ? packToF32UI(
+ sign, 0x95 - shiftCount, (uint_fast32_t) absA<<shiftCount )
+ : 0;
+ return u.f;
+ } else {
+ shiftCount += 7;
+ sig =
+ ( shiftCount < 0 )
+ ? softfloat_shortShift64RightJam( absA, - shiftCount )
+ : (uint_fast32_t) absA<<shiftCount;
+ return softfloat_roundPackToF32( sign, 0x9C - shiftCount, sig );
+ }
+
+}
+
diff --git a/softfloat/i64_to_f64.c b/softfloat/i64_to_f64.c new file mode 100755 index 0000000..1add960 --- /dev/null +++ b/softfloat/i64_to_f64.c @@ -0,0 +1,21 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t i64_to_f64( int_fast64_t a )
+{
+ bool sign;
+ union ui64_f64 uZ;
+
+ sign = ( a < 0 );
+ if ( ! ( a & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) ) {
+ uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0;
+ return uZ.f;
+ }
+ return softfloat_normRoundPackToF64( sign, 0x43C, sign ? - a : a );
+
+}
+
diff --git a/softfloat/internals.h b/softfloat/internals.h new file mode 100755 index 0000000..5e6fd76 --- /dev/null +++ b/softfloat/internals.h @@ -0,0 +1,232 @@ +
+/*** UPDATE COMMENTS. ***/
+
+#include "softfloat_types.h"
+
+union ui32_f32 { uint32_t ui; float32_t f; };
+union ui64_f64 { uint64_t ui; float64_t f; };
+#ifdef LITTLEENDIAN
+union ui128_f128 { uint64_t ui0, ui64; float128_t f; };
+#else
+union ui128_f128 { uint64_t ui64, ui0; float128_t f; };
+#endif
+
+enum {
+ softfloat_mulAdd_subC = 1,
+ softfloat_mulAdd_subProd = 2
+};
+
+uint_fast32_t
+ softfloat_roundPackToUI32( bool, uint_fast64_t, int_fast8_t, bool );
+uint_fast64_t
+ softfloat_roundPackToUI64(
+ bool, uint_fast64_t, uint_fast64_t, int_fast8_t, bool );
+/*----------------------------------------------------------------------------
+| Takes a 64-bit fixed-point value `absZ' with binary point between bits 6
+| and 7, and returns the properly rounded 32-bit integer corresponding to the
+| input. If `zSign' is 1, the input is negated before being converted to an
+| integer. Bit 63 of `absZ' must be zero. Ordinarily, the fixed-point input
+| is simply rounded to an integer, with the inexact exception raised if the
+| input cannot be represented exactly as an integer. However, if the fixed-
+| point input is too large, the invalid exception is raised and the largest
+| positive or negative integer is returned.
+*----------------------------------------------------------------------------*/
+int_fast32_t
+ softfloat_roundPackToI32( bool, uint_fast64_t, int_fast8_t, bool );
+/*----------------------------------------------------------------------------
+| Takes the 128-bit fixed-point value formed by concatenating `absZ0' and
+| `absZ1', with binary point between bits 63 and 64 (between the input words),
+| and returns the properly rounded 64-bit integer corresponding to the input.
+| If `zSign' is 1, the input is negated before being converted to an integer.
+| Ordinarily, the fixed-point input is simply rounded to an integer, with
+| the inexact exception raised if the input cannot be represented exactly as
+| an integer. However, if the fixed-point input is too large, the invalid
+| exception is raised and the largest positive or negative integer is
+| returned.
+*----------------------------------------------------------------------------*/
+int_fast64_t
+ softfloat_roundPackToI64(
+ bool, uint_fast64_t, uint_fast64_t, int_fast8_t, bool );
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#define isNaNF32UI( ui ) (0xFF000000<(uint32_t)((uint_fast32_t)(ui)<<1))
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define signF32UI( a ) ((bool)((uint32_t)(a)>>31))
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define expF32UI( a ) ((int_fast16_t)((a)>>23)&0xFF)
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define fracF32UI( a ) ((a)&0x007FFFFF)
+/*----------------------------------------------------------------------------
+| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a
+| single-precision floating-point value, returning the result. After being
+| shifted into the proper positions, the three fields are simply added
+| together to form the result. This means that any integer portion of `zSig'
+| will be added into the exponent. Since a properly normalized significand
+| will have an integer portion equal to 1, the `zExp' input should be 1 less
+| than the desired result exponent whenever `zSig' is a complete, normalized
+| significand.
+*----------------------------------------------------------------------------*/
+#define packToF32UI( sign, exp, sig ) (((uint32_t)(sign)<<31)+((uint32_t)(exp)<<23)+(sig))
+
+/*----------------------------------------------------------------------------
+| Normalizes the subnormal single-precision floating-point value represented
+| by the denormalized significand `aSig'. The normalized exponent and
+| significand are stored at the locations pointed to by `zExpPtr' and
+| `zSigPtr', respectively.
+*----------------------------------------------------------------------------*/
+struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; };
+struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper single-precision floating-
+| point value corresponding to the abstract input. Ordinarily, the abstract
+| value is simply rounded and packed into the single-precision format, with
+| the inexact exception raised if the abstract input cannot be represented
+| exactly. However, if the abstract value is too large, the overflow and
+| inexact exceptions are raised and an infinity or maximal finite value is
+| returned. If the abstract value is too small, the input value is rounded to
+| a subnormal number, and the underflow and inexact exceptions are raised if
+| the abstract input cannot be represented exactly as a subnormal single-
+| precision floating-point number.
+| The input significand `zSig' has its binary point between bits 30
+| and 29, which is 7 bits to the left of the usual location. This shifted
+| significand must be normalized or smaller. If `zSig' is not normalized,
+| `zExp' must be 0; in that case, the result returned is a subnormal number,
+| and it must not require rounding. In the usual case that `zSig' is
+| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent.
+| The handling of underflow and overflow follows the IEC/IEEE Standard for
+| Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t );
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper single-precision floating-
+| point value corresponding to the abstract input. This routine is just like
+| `roundAndPackFloat32' except that `zSig' does not have to be normalized.
+| Bit 31 of `zSig' must be zero, and `zExp' must be 1 less than the ``true''
+| floating-point exponent.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Returns the result of adding the absolute values of the single-precision
+| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated
+| before being returned. `zSign' is ignored if the result is a NaN.
+| The addition is performed according to the IEC/IEEE Standard for Binary
+| Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t, bool );
+/*----------------------------------------------------------------------------
+| Returns the result of subtracting the absolute values of the single-
+| precision floating-point values `a' and `b'. If `zSign' is 1, the
+| difference is negated before being returned. `zSign' is ignored if the
+| result is a NaN. The subtraction is performed according to the IEC/IEEE
+| Standard for Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t, bool );
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+float32_t
+ softfloat_mulAddF32( int, uint_fast32_t, uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the double-precision floating-point value `a' is a NaN;
+| otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#define isNaNF64UI( ui ) (UINT64_C(0xFFE0000000000000)<(uint64_t)((uint_fast64_t)(ui)<<1))
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define signF64UI( a ) ((bool)((uint64_t)(a)>>63))
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define expF64UI( a ) ((int_fast16_t)((a)>>52)&0x7FF)
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the double-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+#define fracF64UI( a ) ((a)&UINT64_C(0x000FFFFFFFFFFFFF))
+/*----------------------------------------------------------------------------
+| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a
+| double-precision floating-point value, returning the result. After being
+| shifted into the proper positions, the three fields are simply added
+| together to form the result. This means that any integer portion of `zSig'
+| will be added into the exponent. Since a properly normalized significand
+| will have an integer portion equal to 1, the `zExp' input should be 1 less
+| than the desired result exponent whenever `zSig' is a complete, normalized
+| significand.
+*----------------------------------------------------------------------------*/
+#define packToF64UI( sign, exp, sig ) (((uint64_t)(sign)<<63)+((uint64_t)(exp)<<52)+(sig))
+
+/*----------------------------------------------------------------------------
+| Normalizes the subnormal double-precision floating-point value represented
+| by the denormalized significand `aSig'. The normalized exponent and
+| significand are stored at the locations pointed to by `zExpPtr' and
+| `zSigPtr', respectively.
+*----------------------------------------------------------------------------*/
+struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; };
+struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t );
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper double-precision floating-
+| point value corresponding to the abstract input. Ordinarily, the abstract
+| value is simply rounded and packed into the double-precision format, with
+| the inexact exception raised if the abstract input cannot be represented
+| exactly. However, if the abstract value is too large, the overflow and
+| inexact exceptions are raised and an infinity or maximal finite value is
+| returned. If the abstract value is too small, the input value is rounded
+| to a subnormal number, and the underflow and inexact exceptions are raised
+| if the abstract input cannot be represented exactly as a subnormal double-
+| precision floating-point number.
+| The input significand `zSig' has its binary point between bits 62
+| and 61, which is 10 bits to the left of the usual location. This shifted
+| significand must be normalized or smaller. If `zSig' is not normalized,
+| `zExp' must be 0; in that case, the result returned is a subnormal number,
+| and it must not require rounding. In the usual case that `zSig' is
+| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent.
+| The handling of underflow and overflow follows the IEC/IEEE Standard for
+| Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t );
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand `zSig', and returns the proper double-precision floating-
+| point value corresponding to the abstract input. This routine is just like
+| `roundAndPackFloat64' except that `zSig' does not have to be normalized.
+| Bit 63 of `zSig' must be zero, and `zExp' must be 1 less than the ``true''
+| floating-point exponent.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t );
+
+/*----------------------------------------------------------------------------
+| Returns the result of adding the absolute values of the double-precision
+| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated
+| before being returned. `zSign' is ignored if the result is a NaN.
+| The addition is performed according to the IEC/IEEE Standard for Binary
+| Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool );
+/*----------------------------------------------------------------------------
+| Returns the result of subtracting the absolute values of the double-
+| precision floating-point values `a' and `b'. If `zSign' is 1, the
+| difference is negated before being returned. `zSign' is ignored if the
+| result is a NaN. The subtraction is performed according to the IEC/IEEE
+| Standard for Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool );
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+float64_t
+ softfloat_mulAddF64( int, uint_fast64_t, uint_fast64_t, uint_fast64_t );
+
diff --git a/softfloat/primitives.h b/softfloat/primitives.h new file mode 100755 index 0000000..71038ea --- /dev/null +++ b/softfloat/primitives.h @@ -0,0 +1,628 @@ + +/*============================================================================ + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 3. + +*** UPDATE + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#include <stdbool.h> +#include <stdint.h> + +/*** CHANGE TO USE `fast' INTEGER TYPES? ***/ +/*** ADD 80-BIT FUNCTIONS? ***/ + +#ifdef LITTLEENDIAN +struct uintx80 { uint64_t v0; uint16_t v64; }; +struct uint128 { uint64_t v0, v64; }; +struct uint192 { uint64_t v0, v64, v128; }; +struct uint256 { uint64_t v0, v64, v128, v192; }; +#else +struct uintx80 { uint16_t v64; uint64_t v0; }; +struct uint128 { uint64_t v64, v0; }; +struct uint192 { uint64_t v128, v64, v0; }; +struct uint256 { uint64_t v256, v128, v64, v0; }; +#endif + +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { uint64_t v64; uint64_t v0; uint64_t extra; }; + + +/*** SHIFT COUNTS CANNOT BE ZERO. MUST CHECK BEFORE CALLING! ***/ + + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' +| is equal to the 128-bit value formed by concatenating `b0' and `b1'. +| Otherwise, returns 0. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL ) +INLINE bool + softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return ( a64 == b64 ) && ( a0 == b0 ); } +#else +bool softfloat_eq128( uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +| than or equal to the 128-bit value formed by concatenating `b0' and `b1'. +| Otherwise, returns 0. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL ) +INLINE bool + softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 <= b0 ) ); } +#else +bool softfloat_le128( uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +| than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise, +| returns 0. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL ) +INLINE bool + softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 < b0 ) ); } +#else +bool softfloat_lt128( uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the +| number of bits given in `count'. Any bits shifted off are lost. The value +| of `count' must be less than 64. The result is broken into two 64-bit +| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE struct uint128 + softfloat_shortShift128Left( uint64_t a64, uint64_t a0, unsigned int count ) +{ + struct uint128 z; + z.v64 = a64<<count | a0>>( ( - count ) & 63 ); + z.v0 = a0<<count; + return z; +} +#else +struct uint128 softfloat_shortShift128Left( uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left +| by the number of bits given in `count'. Any bits shifted off are lost. +| The value of `count' must be less than 64. The result is broken into three +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr', +| `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE struct uint192 + softfloat_shortShift192Left( + uint64_t a128, uint64_t a64, uint64_t a0, unsigned int count ) +{ + unsigned int negCount = - count; + struct uint192 z; + z.v128 = a128<<count | a64>>( negCount & 63 ); + z.v64 = a64<<count | a0>>( negCount & 63 ); + z.v0 = a0<<count; + return z; +} +#else +struct uint192 + softfloat_shortShift192Left( uint64_t, uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts `a' right by the number of bits given in `count'. If any nonzero +| bits are shifted off, they are ``jammed'' into the least significant bit of +| the result by setting the least significant bit to 1. The value of `count' +| can be arbitrarily large; in particular, if `count' is greater than 32, the +| result will be either 0 or 1, depending on whether `a' is zero or nonzero. +| The result is stored in the location pointed to by `zPtr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE uint32_t softfloat_shift32RightJam( uint32_t a, unsigned int count ) +{ + return + ( count < 32 ) + ? a>>count | ( (uint32_t) ( a<<( ( - count ) & 31 ) ) != 0 ) + : ( a != 0 ); +} +#else +uint32_t softfloat_shift32RightJam( uint32_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shift count is less than 32. +*----------------------------------------------------------------------------*/ +#if defined INLINE +INLINE uint32_t softfloat_shortShift32Right1Jam( uint32_t a ) + { return a>>1 | ( a & 1 ); } +#else +uint32_t softfloat_shortShift32Right1Jam( uint32_t ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts `a' right by the number of bits given in `count'. If any nonzero +| bits are shifted off, they are ``jammed'' into the least significant bit of +| the result by setting the least significant bit to 1. The value of `count' +| can be arbitrarily large; in particular, if `count' is greater than 64, the +| result will be either 0 or 1, depending on whether `a' is zero or nonzero. +| The result is stored in the location pointed to by `zPtr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE uint64_t softfloat_shift64RightJam( uint64_t a, unsigned int count ) +{ + return + ( count < 64 ) + ? a>>count | ( (uint64_t) ( a<<( ( - count ) & 63 ) ) != 0 ) + : ( a != 0 ); +} +#else +uint64_t softfloat_shift64RightJam( uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shift count is less than 64. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE uint64_t + softfloat_shortShift64RightJam( uint64_t a, unsigned int count ) + { return a>>count | ( ( a & ( ( (uint64_t) 1<<count ) - 1 ) ) != 0 ); } +#else +uint64_t softfloat_shortShift64RightJam( uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 +| _plus_ the number of bits given in `count'. The shifted result is at most +| 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The +| bits shifted off form a second 64-bit result as follows: The _last_ bit +| shifted off is the most-significant bit of the extra result, and the other +| 63 bits of the extra result are all zero if and only if _all_but_the_last_ +| bits shifted off were all zero. This extra result is stored in the location +| pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. +| (This routine makes more sense if `a0' and `a1' are considered to form +| a fixed-point value with binary point between `a0' and `a1'. This fixed- +| point value is shifted right by the number of bits given in `count', and +| the integer part of the result is returned at the location pointed to by +| `z0Ptr'. The fractional part of the result may be slightly corrupted as +| described above, and is returned at the location pointed to by `z1Ptr'.) +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE struct uint64_extra + softfloat_shift64ExtraRightJam( + uint64_t a, uint64_t extra, unsigned int count ) +{ + struct uint64_extra z; + if ( count < 64 ) { + z.v = a>>count; + z.extra = a<<( ( - count ) & 63 ); + } else { + z.v = 0; + z.extra = ( count == 64 ) ? a : ( a != 0 ); + } + z.extra |= ( extra != 0 ); + return z; +} +#else +struct uint64_extra + softfloat_shift64ExtraRightJam( uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shift count is less than 64. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE struct uint64_extra + softfloat_shortShift64ExtraRightJam( + uint64_t a, uint64_t extra, unsigned int count ) +{ + struct uint64_extra z; + z.v = a>>count; + z.extra = a<<( ( - count ) & 63 ) | ( extra != 0 ); + return z; +} +#else +struct uint64_extra + softfloat_shortShift64ExtraRightJam( uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +| number of bits given in `count'. Any bits shifted off are lost. The value +| of `count' can be arbitrarily large; in particular, if `count' is greater +| than 128, the result will be 0. The result is broken into two 64-bit pieces +| which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +/*---------------------------------------------------------------------------- +| Shift count is less than 64. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE struct uint128 + softfloat_shortShift128Right( uint64_t a64, uint64_t a0, unsigned int count ) +{ + struct uint128 z; + z.v64 = a64>>count; + z.v0 = a64<<( ( - count ) & 63 ) | a0>>count; + return z; +} +#else +struct uint128 + softfloat_shortShift128Right( uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +| number of bits given in `count'. If any nonzero bits are shifted off, they +| are ``jammed'' into the least significant bit of the result by setting the +| least significant bit to 1. The value of `count' can be arbitrarily large; +| in particular, if `count' is greater than 128, the result will be either +| 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or +| nonzero. The result is broken into two 64-bit pieces which are stored at +| the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL ) +INLINE struct uint128 + softfloat_shift128RightJam( uint64_t a64, uint64_t a0, unsigned int count ) +{ + unsigned int negCount; + struct uint128 z; + if ( count < 64 ) { + negCount = - count; + z.v64 = a64>>( count & 63 ); + z.v0 = + a64<<( negCount & 63 ) | a0>>count + | ( (uint64_t) ( a0<<( negCount & 63 ) ) != 0 ); + } else { + z.v64 = 0; + z.v0 = + ( count < 128 ) + ? a64>>( count & 63 ) + | ( ( ( a64 & ( ( (uint64_t) 1<<( count & 63 ) ) - 1 ) ) + | a0 ) + != 0 ) + : ( ( a64 | a0 ) != 0 ); + } + return z; +} +#else +struct uint128 + softfloat_shift128RightJam( uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right +| by 64 _plus_ the number of bits given in `count'. The shifted result is +| at most 128 nonzero bits; these are broken into two 64-bit pieces which are +| stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted +| off form a third 64-bit result as follows: The _last_ bit shifted off is +| the most-significant bit of the extra result, and the other 63 bits of the +| extra result are all zero if and only if _all_but_the_last_ bits shifted off +| were all zero. This extra result is stored in the location pointed to by +| `z2Ptr'. The value of `count' can be arbitrarily large. +| (This routine makes more sense if `a0', `a1', and `a2' are considered +| to form a fixed-point value with binary point between `a1' and `a2'. This +| fixed-point value is shifted right by the number of bits given in `count', +| and the integer part of the result is returned at the locations pointed to +| by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly +| corrupted as described above, and is returned at the location pointed to by +| `z2Ptr'.) +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 5 <= INLINE_LEVEL ) +INLINE struct uint128_extra + softfloat_shift128ExtraRightJam( + uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count ) +{ + unsigned int negCount = - count; + struct uint128_extra z; + if ( count < 64 ) { + z.v64 = a64>>count; + z.v0 = a64<<( negCount & 63 ) | a0>>count; + z.extra = a0<<( negCount & 63 ); + } else { + z.v64 = 0; + if ( count == 64 ) { + z.v0 = a64; + z.extra = a0; + } else { + extra |= a0; + if ( count < 128 ) { + z.v0 = a64>>( count & 63 ); + z.extra = a64<<( negCount & 63 ); + } else { + z.v0 = 0; + z.extra = ( count == 128 ) ? a64 : ( a64 != 0 ); + } + } + } + z.extra |= ( extra != 0 ); + return z; +} +#else +struct uint128_extra + softfloat_shift128ExtraRightJam( uint64_t, uint64_t, uint64_t, unsigned int ); +#endif + +/*---------------------------------------------------------------------------- +| Shift count is less than 64. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE struct uint128_extra + softfloat_shortShift128ExtraRightJam( + uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count ) +{ + unsigned int negCount = - count; + struct uint128_extra z; + z.v64 = a64>>count; + z.v0 = a64<<( negCount & 63 ) | a0>>count; + z.extra = a0<<( negCount & 63 ) | ( extra != 0 ); + return z; +} +#else +struct uint128_extra + softfloat_shortShift128ExtraRightJam( + uint64_t, uint64_t, uint64_t, unsigned int ); +#endif + +extern const uint8_t softfloat_countLeadingZeros8[ 256 ]; + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| `a'. If `a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE int softfloat_countLeadingZeros32( uint32_t a ) +{ + int count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[ a>>24 ]; + return count; +} +#else +int softfloat_countLeadingZeros32( uint32_t ); +#endif + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| `a'. If `a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL ) +INLINE int softfloat_countLeadingZeros64( uint64_t a ) +{ + int count = 32; + uint32_t a32 = a; + if ( UINT64_C( 0x100000000 ) <= a ) { + count = 0; + a32 = a>>32; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[ a32>>24 ]; + return count; +} +#else +int softfloat_countLeadingZeros64( uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit +| value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so +| any carry out is lost. The result is broken into two 64-bit pieces which +| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64; + z.v64 += ( z.v0 < a0 ); + return z; +} +#else +struct uint128 softfloat_add128( uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the +| 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is +| modulo 2^192, so any carry out is lost. The result is broken into three +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr', +| `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE struct uint192 + softfloat_add192( + uint64_t a128, + uint64_t a64, + uint64_t a0, + uint64_t b128, + uint64_t b64, + uint64_t b0 + ) +{ + struct uint192 z; + unsigned int carry64, carry128; + z.v0 = a0 + b0; + carry64 = ( z.v0 < a0 ); + z.v64 = a64 + b64; + carry128 = ( z.v64 < a64 ); + z.v128 = a128 + b128; + z.v64 += carry64; + carry128 += ( z.v64 < carry64 ); + z.v128 += carry128; + return z; +} +#else +struct uint192 + softfloat_add192( + uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the +| 128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo +| 2^128, so any borrow out (carry out) is lost. The result is broken into two +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and +| `z1Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 2 <= INLINE_LEVEL ) +INLINE struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= ( a0 < b0 ); + return z; +} +#else +struct uint128 softfloat_sub128( uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' +| from the 192-bit value formed by concatenating `a0', `a1', and `a2'. +| Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The +| result is broken into three 64-bit pieces which are stored at the locations +| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 3 <= INLINE_LEVEL ) +INLINE struct uint192 + softfloat_sub192( + uint64_t a128, + uint64_t a64, + uint64_t a0, + uint64_t b128, + uint64_t b64, + uint64_t b0 + ) +{ + struct uint192 z; + unsigned int borrow64, borrow128; + z.v0 = a0 - b0; + borrow64 = ( a0 < b0 ); + z.v64 = a64 - b64; + borrow128 = ( a64 < b64 ); + z.v128 = a128 - b128; + borrow128 += ( z.v64 < borrow64 ); + z.v64 -= borrow64; + z.v128 -= borrow128; + return z; +} +#else +struct uint192 + softfloat_sub192( + uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Multiplies `a' by `b' to obtain a 128-bit product. The product is broken +| into two 64-bit pieces which are stored at the locations pointed to by +| `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && ( 4 <= INLINE_LEVEL ) +INLINE struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32 = a>>32; + uint32_t a0 = a; + uint32_t b32 = b>>32; + uint32_t b0 = b; + struct uint128 z; + uint64_t mid1, mid2, mid; + z.v0 = (uint64_t) a0 * b0; + mid1 = (uint64_t) a32 * b0; + mid2 = (uint64_t) a0 * b32; + z.v64 = (uint64_t) a32 * b32; + mid = mid1 + mid2; + z.v64 += ( (uint64_t) ( mid < mid1 ) )<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += ( z.v0 < mid ); + return z; +} +#else +struct uint128 softfloat_mul64To128( uint64_t, uint64_t ); +#endif + +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit value formed by concatenating `a0' and `a1' by +| `b' to obtain a 192-bit product. The product is broken into three 64-bit +| pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and +| `z2Ptr'. +*----------------------------------------------------------------------------*/ +struct uint192 softfloat_mul128By64To192( uint64_t, uint64_t, uint64_t ); +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the +| 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit +| product. The product is broken into four 64-bit pieces which are stored at +| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. +*----------------------------------------------------------------------------*/ +struct uint256 softfloat_mul128To256( uint64_t, uint64_t, uint64_t, uint64_t ); + +/*---------------------------------------------------------------------------- +| Returns an approximation to the 64-bit integer quotient obtained by dividing +| `b' into the 128-bit value formed by concatenating `a0' and `a1'. The +| divisor `b' must be at least 2^63. If q is the exact quotient truncated +| toward zero, the approximation returned lies between q and q + 2 inclusive. +| If the exact quotient q is larger than 64 bits, the maximum positive 64-bit +| unsigned integer is returned. +*----------------------------------------------------------------------------*/ +uint64_t softfloat_estimateDiv128To64( uint64_t, uint64_t, uint64_t ); + +/*---------------------------------------------------------------------------- +| Returns an approximation to the square root of the 32-bit significand given +| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of +| `aExp' (the least significant bit) is 1, the integer returned approximates +| 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' +| is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either +| case, the approximation returned lies strictly within +/-2 of the exact +| value. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_estimateSqrt32( unsigned int, uint32_t ); + diff --git a/softfloat/s_add128.c b/softfloat/s_add128.c new file mode 100755 index 0000000..59c0348 --- /dev/null +++ b/softfloat/s_add128.c @@ -0,0 +1,17 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+ struct uint128 z;
+
+ z.v0 = a0 + b0;
+ z.v64 = a64 + b64;
+ z.v64 += ( z.v0 < a0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_add192.c b/softfloat/s_add192.c new file mode 100755 index 0000000..543eb5d --- /dev/null +++ b/softfloat/s_add192.c @@ -0,0 +1,30 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_add192(
+ uint64_t a128,
+ uint64_t a64,
+ uint64_t a0,
+ uint64_t b128,
+ uint64_t b64,
+ uint64_t b0
+ )
+{
+ struct uint192 z;
+ unsigned int carry64, carry128;
+
+ z.v0 = a0 + b0;
+ carry64 = ( z.v0 < a0 );
+ z.v64 = a64 + b64;
+ carry128 = ( z.v64 < a64 );
+ z.v128 = a128 + b128;
+ z.v64 += carry64;
+ carry128 += ( z.v64 < carry64 );
+ z.v128 += carry128;
+ return z;
+
+}
+
diff --git a/softfloat/s_addMagsF32.c b/softfloat/s_addMagsF32.c new file mode 100755 index 0000000..f361e2b --- /dev/null +++ b/softfloat/s_addMagsF32.c @@ -0,0 +1,75 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+
+float32_t
+ softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB, bool signZ )
+{
+ int_fast16_t expA;
+ uint_fast32_t sigA;
+ int_fast16_t expB;
+ uint_fast32_t sigB;
+ int_fast16_t expDiff;
+ uint_fast32_t uiZ;
+ int_fast16_t expZ;
+ uint_fast32_t sigZ;
+ union ui32_f32 uZ;
+
+ expA = expF32UI( uiA );
+ sigA = fracF32UI( uiA );
+ expB = expF32UI( uiB );
+ sigB = fracF32UI( uiB );
+ expDiff = expA - expB;
+ sigA <<= 6;
+ sigB <<= 6;
+ if ( ! expDiff ) {
+ if ( expA == 0xFF ) {
+ if ( sigA | sigB ) goto propagateNaN;
+ uiZ = uiA;
+ goto uiZ;
+ }
+ if ( ! expA ) {
+ uiZ = packToF32UI( signZ, 0, ( uiA + uiB ) & 0x7FFFFFFF );
+ goto uiZ;
+ }
+ expZ = expA;
+ sigZ = 0x40000000 + sigA + sigB;
+ } else {
+ if ( expDiff < 0 ) {
+ if ( expB == 0xFF ) {
+ if ( sigB ) goto propagateNaN;
+ uiZ = packToF32UI( signZ, 0xFF, 0 );
+ goto uiZ;
+ }
+ expZ = expB;
+ sigA += expA ? 0x20000000 : sigA;
+ sigA = softfloat_shift32RightJam( sigA, - expDiff );
+ } else {
+ if ( expA == 0xFF ) {
+ if ( sigA ) goto propagateNaN;
+ uiZ = uiA;
+ goto uiZ;
+ }
+ expZ = expA;
+ sigB += expB ? 0x20000000 : sigB;
+ sigB = softfloat_shift32RightJam( sigB, expDiff );
+ }
+ sigZ = 0x20000000 + sigA + sigB;
+ if ( sigZ < 0x40000000 ) {
+ --expZ;
+ sigZ <<= 1;
+ }
+ }
+ return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN:
+ uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/s_addMagsF64.c b/softfloat/s_addMagsF64.c new file mode 100755 index 0000000..a81c3e4 --- /dev/null +++ b/softfloat/s_addMagsF64.c @@ -0,0 +1,77 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + expDiff = expA - expB; + sigA <<= 9; + sigB <<= 9; + if ( ! expDiff ) { + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + if ( ! expA ) { + uiZ = + packToF64UI( + signZ, 0, ( uiA + uiB ) & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ); + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x4000000000000000 ) + sigA + sigB; + } else { + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? UINT64_C( 0x2000000000000000 ) : sigA; + sigA = softfloat_shift64RightJam( sigA, - expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? UINT64_C( 0x2000000000000000 ) : sigB; + sigB = softfloat_shift64RightJam( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/s_countLeadingZeros32.c b/softfloat/s_countLeadingZeros32.c new file mode 100755 index 0000000..0bd17e1 --- /dev/null +++ b/softfloat/s_countLeadingZeros32.c @@ -0,0 +1,22 @@ + +#include <stdint.h> +#include "primitives.h" + +int softfloat_countLeadingZeros32( uint32_t a ) +{ + int count; + + count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[ a>>24 ]; + return count; + +} + diff --git a/softfloat/s_countLeadingZeros64.c b/softfloat/s_countLeadingZeros64.c new file mode 100755 index 0000000..79f4280 --- /dev/null +++ b/softfloat/s_countLeadingZeros64.c @@ -0,0 +1,32 @@ + +#include <stdint.h> +#include "primitives.h" +#include "platform.h" + +int softfloat_countLeadingZeros64( uint64_t a ) +{ + int count; + uint32_t a32; + + count = 32; + a32 = a; + if ( UINT64_C( 0x100000000 ) <= a ) { + count = 0; + a32 = a>>32; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[ a32>>24 ]; + return count; + +} + diff --git a/softfloat/s_countLeadingZeros8.c b/softfloat/s_countLeadingZeros8.c new file mode 100755 index 0000000..4eca7e9 --- /dev/null +++ b/softfloat/s_countLeadingZeros8.c @@ -0,0 +1,24 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +const uint8_t softfloat_countLeadingZeros8[ 256 ] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/softfloat/s_eq128.c b/softfloat/s_eq128.c new file mode 100755 index 0000000..7261dc4 --- /dev/null +++ b/softfloat/s_eq128.c @@ -0,0 +1,13 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return ( a64 == b64 ) && ( a0 == b0 ); + +} + diff --git a/softfloat/s_estimateDiv128To64.c b/softfloat/s_estimateDiv128To64.c new file mode 100755 index 0000000..f8610a2 --- /dev/null +++ b/softfloat/s_estimateDiv128To64.c @@ -0,0 +1,28 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint64_t softfloat_estimateDiv128To64( uint64_t a64, uint64_t a0, uint64_t b ) +{ + uint32_t b32; + uint64_t z; + struct uint128 term, rem; + uint64_t rem32; + + if ( b <= a64 ) return UINT64_C( 0xFFFFFFFFFFFFFFFF ); + b32 = b>>32; + z = ( (uint64_t) b32<<32 <= a64 ) ? UINT64_C( 0xFFFFFFFF00000000 ) + : ( a64 / b32 )<<32; + term = softfloat_mul64To128( b, z ); + rem = softfloat_sub128( a64, a0, term.v64, term.v0 ); + while ( UINT64_C( 0x8000000000000000 ) <= rem.v64 ) { + z -= UINT64_C( 0x100000000 ); + rem = softfloat_add128( rem.v64, rem.v0, b32, (uint64_t) ( b<<32 ) ); + } + rem32 = ( rem.v64<<32 ) | ( rem.v0>>32 ); + z |= ( (uint64_t) b32<<32 <= rem32 ) ? 0xFFFFFFFF : rem32 / b32; + return z; + +} + diff --git a/softfloat/s_estimateSqrt32.c b/softfloat/s_estimateSqrt32.c new file mode 100755 index 0000000..e22a9dc --- /dev/null +++ b/softfloat/s_estimateSqrt32.c @@ -0,0 +1,37 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint32_t softfloat_estimateSqrt32( unsigned int expA, uint32_t a ) +{ + static const uint16_t sqrtOddAdjustments[] = { + 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, + 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 + }; + static const uint16_t sqrtEvenAdjustments[] = { + 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, + 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 + }; + int index; + uint32_t z; + union { uint32_t ui; int32_t i; } u32; + + index = ( a>>27 ) & 15; + if ( expA & 1 ) { + z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ]; + z = ( ( a / z )<<14 ) + ( z<<15 ); + a >>= 1; + } else { + z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ]; + z = a / z + z; + z = ( 0x20000 <= z ) ? 0xFFFF8000 : z<<15; + if ( z <= a ) { + u32.ui = a; + return u32.i>>1; + } + } + return (uint32_t) ( ( (uint64_t) a<<31 ) / z ) + ( z>>1 ); + +} + diff --git a/softfloat/s_le128.c b/softfloat/s_le128.c new file mode 100755 index 0000000..83b1d7f --- /dev/null +++ b/softfloat/s_le128.c @@ -0,0 +1,13 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+
+ return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 <= b0 ) );
+
+}
+
diff --git a/softfloat/s_lt128.c b/softfloat/s_lt128.c new file mode 100755 index 0000000..33a3df4 --- /dev/null +++ b/softfloat/s_lt128.c @@ -0,0 +1,13 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+
+ return ( a64 < b64 ) || ( ( a64 == b64 ) && ( a0 < b0 ) );
+
+}
+
diff --git a/softfloat/s_mul128By64To192.c b/softfloat/s_mul128By64To192.c new file mode 100755 index 0000000..dfa8825 --- /dev/null +++ b/softfloat/s_mul128By64To192.c @@ -0,0 +1,20 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_mul128By64To192( uint64_t a64, uint64_t a0, uint64_t b )
+{
+ struct uint128 p0, p64;
+ struct uint192 z;
+
+ p0 = softfloat_mul64To128( a0, b );
+ z.v0 = p0.v0;
+ p64 = softfloat_mul64To128( a64, b );
+ z.v64 = p64.v0 + p0.v64;
+ z.v128 = p64.v64 + ( z.v64 < p64.v0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_mul128To256.c b/softfloat/s_mul128To256.c new file mode 100755 index 0000000..a96cd94 --- /dev/null +++ b/softfloat/s_mul128To256.c @@ -0,0 +1,28 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint256
+ softfloat_mul128To256( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+ struct uint128 p0, p64, p128;
+ struct uint256 z;
+
+ p0 = softfloat_mul64To128( a0, b0 );
+ z.v0 = p0.v0;
+ p64 = softfloat_mul64To128( a64, b0 );
+ z.v64 = p64.v0 + p0.v64;
+ z.v128 = p64.v64 + ( z.v64 < p64.v0 );
+ p128 = softfloat_mul64To128( a64, b64 );
+ z.v128 += p128.v0;
+ z.v192 = p128.v64 + ( z.v128 < p128.v0 );
+ p64 = softfloat_mul64To128( a0, b64 );
+ z.v64 += p64.v0;
+ p64.v64 += ( z.v64 < p64.v0 );
+ z.v128 += p64.v64;
+ z.v192 += ( z.v128 < p64.v64 );
+ return z;
+
+}
+
diff --git a/softfloat/s_mul64To128.c b/softfloat/s_mul64To128.c new file mode 100755 index 0000000..c17780b --- /dev/null +++ b/softfloat/s_mul64To128.c @@ -0,0 +1,28 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b )
+{
+ uint32_t a32, a0, b32, b0;
+ struct uint128 z;
+ uint64_t mid1, mid2, mid;
+
+ a32 = a>>32;
+ a0 = a;
+ b32 = b>>32;
+ b0 = b;
+ z.v0 = (uint64_t) a0 * b0;
+ mid1 = (uint64_t) a32 * b0;
+ mid2 = (uint64_t) a0 * b32;
+ z.v64 = (uint64_t) a32 * b32;
+ mid = mid1 + mid2;
+ z.v64 += ( (uint64_t) ( mid < mid1 ) )<<32 | mid>>32;
+ mid <<= 32;
+ z.v0 += mid;
+ z.v64 += ( z.v0 < mid );
+ return z;
+
+}
+
diff --git a/softfloat/s_mulAddF32.c b/softfloat/s_mulAddF32.c new file mode 100755 index 0000000..e55a0ba --- /dev/null +++ b/softfloat/s_mulAddF32.c @@ -0,0 +1,171 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float32_t
+ softfloat_mulAddF32(
+ int op, uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC )
+{
+ bool signA;
+ int_fast16_t expA;
+ uint_fast32_t sigA;
+ bool signB;
+ int_fast16_t expB;
+ uint_fast32_t sigB;
+ bool signC;
+ int_fast16_t expC;
+ uint_fast32_t sigC;
+ bool signProd;
+ uint_fast32_t magBits, uiZ;
+ struct exp16_sig32 normExpSig;
+ int_fast16_t expProd;
+ uint_fast64_t sigProd;
+ bool signZ;
+ int_fast16_t expZ;
+ uint_fast32_t sigZ;
+ int_fast16_t expDiff;
+ uint_fast64_t sigZ64, sigC64;
+ int shiftCount;
+ union ui32_f32 uZ;
+
+ signA = signF32UI( uiA );
+ expA = expF32UI( uiA );
+ sigA = fracF32UI( uiA );
+ signB = signF32UI( uiB );
+ expB = expF32UI( uiB );
+ sigB = fracF32UI( uiB );
+ signC = signF32UI( uiC ) ^ ( op == softfloat_mulAdd_subC );
+ expC = expF32UI( uiC );
+ sigC = fracF32UI( uiC );
+ signProd = signA ^ signB ^ ( op == softfloat_mulAdd_subProd );
+ if ( expA == 0xFF ) {
+ if ( sigA || ( ( expB == 0xFF ) && sigB ) ) goto propagateNaN_ABC;
+ magBits = expB | sigB;
+ goto infProdArg;
+ }
+ if ( expB == 0xFF ) {
+ if ( sigB ) goto propagateNaN_ABC;
+ magBits = expA | sigA;
+ goto infProdArg;
+ }
+ if ( expC == 0xFF ) {
+ if ( sigC ) {
+ uiZ = 0;
+ goto propagateNaN_ZC;
+ }
+ uiZ = uiC;
+ goto uiZ;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) goto zeroProd;
+ normExpSig = softfloat_normSubnormalF32Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ if ( ! expB ) {
+ if ( ! sigB ) goto zeroProd;
+ normExpSig = softfloat_normSubnormalF32Sig( sigB );
+ expB = normExpSig.exp;
+ sigB = normExpSig.sig;
+ }
+ expProd = expA + expB - 0x7E;
+ sigA = ( sigA | 0x00800000 )<<7;
+ sigB = ( sigB | 0x00800000 )<<7;
+ sigProd = (uint_fast64_t) sigA * sigB;
+ if ( sigProd < UINT64_C( 0x2000000000000000 ) ) {
+ --expProd;
+ sigProd <<= 1;
+ }
+ signZ = signProd;
+ if ( ! expC ) {
+ if ( ! sigC ) {
+ expZ = expProd - 1;
+ sigZ = softfloat_shortShift64RightJam( sigProd, 31 );
+ goto roundPack;
+ }
+ normExpSig = softfloat_normSubnormalF32Sig( sigC );
+ expC = normExpSig.exp;
+ sigC = normExpSig.sig;
+ }
+ sigC = ( sigC | 0x00800000 )<<6;
+ expDiff = expProd - expC;
+ if ( signProd == signC ) {
+ if ( expDiff <= 0 ) {
+ expZ = expC;
+ sigZ = sigC + softfloat_shift64RightJam( sigProd, 32 - expDiff );
+ } else {
+ expZ = expProd;
+ sigZ64 =
+ sigProd
+ + softfloat_shift64RightJam(
+ (uint_fast64_t) sigC<<32, expDiff );
+ sigZ = softfloat_shortShift64RightJam( sigZ64, 32 );
+ }
+ if ( sigZ < 0x40000000 ) {
+ --expZ;
+ sigZ <<= 1;
+ }
+ } else {
+/*** OPTIMIZE BETTER? ***/
+ sigC64 = (uint_fast64_t) sigC<<32;
+ if ( expDiff < 0 ) {
+ signZ = signC;
+ expZ = expC;
+ sigZ64 = sigC64 - softfloat_shift64RightJam( sigProd, - expDiff );
+ } else if ( ! expDiff ) {
+ expZ = expProd;
+ sigZ64 = sigProd - sigC64;
+ if ( ! sigZ64 ) goto completeCancellation;
+ if ( sigZ64 & UINT64_C( 0x8000000000000000 ) ) {
+ signZ ^= 1;
+ sigZ64 = - sigZ64;
+ }
+ } else {
+ expZ = expProd;
+ sigZ64 = sigProd - softfloat_shift64RightJam( sigC64, expDiff );
+ }
+ shiftCount = softfloat_countLeadingZeros64( sigZ64 ) - 1;
+ expZ -= shiftCount;
+ shiftCount -= 32;
+ if ( shiftCount < 0 ) {
+ sigZ = softfloat_shortShift64RightJam( sigZ64, - shiftCount );
+ } else {
+ sigZ = (uint_fast32_t) sigZ64<<shiftCount;
+ }
+ }
+ roundPack:
+ return softfloat_roundPackToF32( signZ, expZ, sigZ );
+ propagateNaN_ABC:
+ uiZ = softfloat_propagateNaNF32UI( uiA, uiB );
+ goto propagateNaN_ZC;
+ infProdArg:
+ if ( magBits ) {
+ uiZ = packToF32UI( signProd, 0xFF, 0 );
+ if ( expC != 0xFF ) goto uiZ;
+ if ( sigC ) goto propagateNaN_ZC;
+ if ( signProd == signC ) goto uiZ;
+ }
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF32UI;
+ propagateNaN_ZC:
+ uiZ = softfloat_propagateNaNF32UI( uiZ, uiC );
+ goto uiZ;
+ zeroProd:
+ uiZ = uiC;
+ if ( ! ( expC | sigC ) && ( signProd != signC ) ) {
+ completeCancellation:
+ uiZ =
+ packToF32UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+ }
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/s_mulAddF64.c b/softfloat/s_mulAddF64.c new file mode 100755 index 0000000..01ba3b4 --- /dev/null +++ b/softfloat/s_mulAddF64.c @@ -0,0 +1,188 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+float64_t
+ softfloat_mulAddF64(
+ int op, uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC )
+{
+ bool signA;
+ int_fast16_t expA;
+ uint_fast64_t sigA;
+ bool signB;
+ int_fast16_t expB;
+ uint_fast64_t sigB;
+ bool signC;
+ int_fast16_t expC;
+ uint_fast64_t sigC;
+ bool signProd;
+ uint_fast64_t magBits, uiZ;
+ struct exp16_sig64 normExpSig;
+ int_fast16_t expProd;
+ struct uint128 sigProd;
+ bool signZ;
+ int_fast16_t expZ;
+ uint_fast64_t sigZ;
+ int_fast16_t expDiff;
+ struct uint128 sigC128, sigZ128;
+ int shiftCount;
+ union ui64_f64 uZ;
+
+ signA = signF64UI( uiA );
+ expA = expF64UI( uiA );
+ sigA = fracF64UI( uiA );
+ signB = signF64UI( uiB );
+ expB = expF64UI( uiB );
+ sigB = fracF64UI( uiB );
+ signC = signF64UI( uiC ) ^ ( op == softfloat_mulAdd_subC );
+ expC = expF64UI( uiC );
+ sigC = fracF64UI( uiC );
+ signProd = signA ^ signB ^ ( op == softfloat_mulAdd_subProd );
+ if ( expA == 0x7FF ) {
+ if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN_ABC;
+ magBits = expB | sigB;
+ goto infProdArg;
+ }
+ if ( expB == 0x7FF ) {
+ if ( sigB ) goto propagateNaN_ABC;
+ magBits = expA | sigA;
+ goto infProdArg;
+ }
+ if ( expC == 0x7FF ) {
+ if ( sigC ) {
+ uiZ = 0;
+ goto propagateNaN_ZC;
+ }
+ uiZ = uiC;
+ goto uiZ;
+ }
+ if ( ! expA ) {
+ if ( ! sigA ) goto zeroProd;
+ normExpSig = softfloat_normSubnormalF64Sig( sigA );
+ expA = normExpSig.exp;
+ sigA = normExpSig.sig;
+ }
+ if ( ! expB ) {
+ if ( ! sigB ) goto zeroProd;
+ normExpSig = softfloat_normSubnormalF64Sig( sigB );
+ expB = normExpSig.exp;
+ sigB = normExpSig.sig;
+ }
+ expProd = expA + expB - 0x3FE;
+ sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;
+ sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<10;
+ sigProd = softfloat_mul64To128( sigA, sigB );
+ if ( sigProd.v64 < UINT64_C( 0x2000000000000000 ) ) {
+ --expProd;
+ sigProd = softfloat_shortShift128Left( sigProd.v64, sigProd.v0, 1 );
+ }
+ signZ = signProd;
+ if ( ! expC ) {
+ if ( ! sigC ) {
+ expZ = expProd - 1;
+ sigZ = sigProd.v64<<1 | ( sigProd.v0 != 0 );
+ goto roundPack;
+ }
+ normExpSig = softfloat_normSubnormalF64Sig( sigC );
+ expC = normExpSig.exp;
+ sigC = normExpSig.sig;
+ }
+ sigC = ( sigC | UINT64_C( 0x0010000000000000 ) )<<9;
+ expDiff = expProd - expC;
+ if ( signProd == signC ) {
+ if ( expDiff <= 0 ) {
+ expZ = expC;
+ if ( expDiff ) {
+ sigProd.v64 =
+ softfloat_shift64RightJam( sigProd.v64, - expDiff );
+ }
+ sigZ = ( sigC + sigProd.v64 ) | ( sigProd.v0 != 0 );
+ } else {
+ expZ = expProd;
+ sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );
+ sigZ128 =
+ softfloat_add128(
+ sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );
+ sigZ = sigZ128.v64 | ( sigZ128.v0 != 0 );
+ }
+ if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {
+ --expZ;
+ sigZ <<= 1;
+ }
+ } else {
+/*** OPTIMIZE BETTER? ***/
+ if ( expDiff < 0 ) {
+ signZ = signC;
+ expZ = expC;
+ sigProd =
+ softfloat_shift128RightJam(
+ sigProd.v64, sigProd.v0, - expDiff );
+ sigZ128 = softfloat_sub128( sigC, 0, sigProd.v64, sigProd.v0 );
+ } else if ( ! expDiff ) {
+ expZ = expProd;
+ sigZ128 = softfloat_sub128( sigProd.v64, sigProd.v0, sigC, 0 );
+ if ( ! ( sigZ128.v64 | sigZ128.v0 ) ) goto completeCancellation;
+ if ( sigZ128.v64 & UINT64_C( 0x8000000000000000 ) ) {
+ signZ ^= 1;
+ sigZ128 = softfloat_sub128( 0, 0, sigZ128.v64, sigZ128.v0 );
+ }
+ } else {
+ expZ = expProd;
+ sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );
+ sigZ128 =
+ softfloat_sub128(
+ sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );
+ }
+ if ( ! sigZ128.v64 ) {
+ expZ -= 64;
+ sigZ128.v64 = sigZ128.v0;
+ sigZ128.v0 = 0;
+ }
+ shiftCount = softfloat_countLeadingZeros64( sigZ128.v64 ) - 1;
+ expZ -= shiftCount;
+ if ( shiftCount < 0 ) {
+ sigZ = softfloat_shortShift64RightJam( sigZ128.v64, - shiftCount );
+ } else {
+ sigZ128 =
+ softfloat_shortShift128Left(
+ sigZ128.v64, sigZ128.v0, shiftCount );
+ sigZ = sigZ128.v64;
+ }
+ sigZ |= ( sigZ128.v0 != 0 );
+ }
+ roundPack:
+ return softfloat_roundPackToF64( signZ, expZ, sigZ );
+ propagateNaN_ABC:
+ uiZ = softfloat_propagateNaNF64UI( uiA, uiB );
+ goto propagateNaN_ZC;
+ infProdArg:
+ if ( magBits ) {
+ uiZ = packToF64UI( signProd, 0x7FF, 0 );
+ if ( expC != 0x7FF ) goto uiZ;
+ if ( sigC ) goto propagateNaN_ZC;
+ if ( signProd == signC ) goto uiZ;
+ }
+ invalid:
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ uiZ = defaultNaNF64UI;
+ propagateNaN_ZC:
+ uiZ = softfloat_propagateNaNF64UI( uiZ, uiC );
+ goto uiZ;
+ zeroProd:
+ uiZ = uiC;
+ if ( ! ( expC | sigC ) && ( signProd != signC ) ) {
+ completeCancellation:
+ uiZ =
+ packToF64UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );
+ }
+ uiZ:
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/s_normRoundPackToF32.c b/softfloat/s_normRoundPackToF32.c new file mode 100755 index 0000000..2e6f4b0 --- /dev/null +++ b/softfloat/s_normRoundPackToF32.c @@ -0,0 +1,24 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+float32_t
+ softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig )
+{
+ int shiftCount;
+ union ui32_f32 uZ;
+
+ shiftCount = softfloat_countLeadingZeros32( sig ) - 1;
+ exp -= shiftCount;
+ if ( ( 7 <= shiftCount ) && ( (uint16_t) exp < 0xFD ) ) {
+ uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<( shiftCount - 7 ) );
+ return uZ.f;
+ } else {
+ return softfloat_roundPackToF32( sign, exp, sig<<shiftCount );
+ }
+
+}
+
diff --git a/softfloat/s_normRoundPackToF64.c b/softfloat/s_normRoundPackToF64.c new file mode 100755 index 0000000..64dced4 --- /dev/null +++ b/softfloat/s_normRoundPackToF64.c @@ -0,0 +1,24 @@ +
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+float64_t
+ softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig )
+{
+ int shiftCount;
+ union ui64_f64 uZ;
+
+ shiftCount = softfloat_countLeadingZeros64( sig ) - 1;
+ exp -= shiftCount;
+ if ( ( 10 <= shiftCount ) && ( (uint16_t) exp < 0x7FD ) ) {
+ uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<( shiftCount - 10 ) );
+ return uZ.f;
+ } else {
+ return softfloat_roundPackToF64( sign, exp, sig<<shiftCount );
+ }
+
+}
+
diff --git a/softfloat/s_normSubnormalF32Sig.c b/softfloat/s_normSubnormalF32Sig.c new file mode 100755 index 0000000..b98eb86 --- /dev/null +++ b/softfloat/s_normSubnormalF32Sig.c @@ -0,0 +1,18 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig )
+{
+ int shiftCount;
+ struct exp16_sig32 z;
+
+ shiftCount = softfloat_countLeadingZeros32( sig ) - 8;
+ z.exp = 1 - shiftCount;
+ z.sig = sig<<shiftCount;
+ return z;
+
+}
+
diff --git a/softfloat/s_normSubnormalF64Sig.c b/softfloat/s_normSubnormalF64Sig.c new file mode 100755 index 0000000..45a7c9e --- /dev/null +++ b/softfloat/s_normSubnormalF64Sig.c @@ -0,0 +1,18 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+
+struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig )
+{
+ int shiftCount;
+ struct exp16_sig64 z;
+
+ shiftCount = softfloat_countLeadingZeros64( sig ) - 11;
+ z.exp = 1 - shiftCount;
+ z.sig = sig<<shiftCount;
+ return z;
+
+}
+
diff --git a/softfloat/s_roundPackToF32.c b/softfloat/s_roundPackToF32.c new file mode 100755 index 0000000..11764f1 --- /dev/null +++ b/softfloat/s_roundPackToF32.c @@ -0,0 +1,65 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int roundingMode; + bool roundNearestEven; + int roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + roundingMode = softfloat_roundingMode; + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + roundIncrement = 0x40; + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + roundIncrement = + ( roundingMode == softfloat_round_minMag ) + || ( roundingMode + == ( sign ? softfloat_round_max : softfloat_round_min ) ) + ? 0 + : 0x7F; + } + roundBits = sig & 0x7F; + if ( 0xFD <= (uint16_t) exp ) { + if ( exp < 0 ) { + isTiny = + ( softfloat_detectTininess + == softfloat_tininess_beforeRounding ) + || ( exp < -1 ) + || ( sig + roundIncrement < 0x80000000 ); + sig = softfloat_shift32RightJam( sig, - exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + ( 0xFD < exp ) || ( 0x80000000 <= sig + roundIncrement ) + ) { + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + if ( roundBits ) softfloat_exceptionFlags |= softfloat_flag_inexact; + sig = ( sig + roundIncrement )>>7; + sig &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven ); + uiZ = packToF32UI( sign, sig ? exp : 0, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/s_roundPackToF64.c b/softfloat/s_roundPackToF64.c new file mode 100755 index 0000000..fb0ef1d --- /dev/null +++ b/softfloat/s_roundPackToF64.c @@ -0,0 +1,66 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int roundingMode; + bool roundNearestEven; + int roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + roundingMode = softfloat_roundingMode; + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + roundIncrement = 0x200; + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + roundIncrement = + ( roundingMode == softfloat_round_minMag ) + || ( roundingMode + == ( sign ? softfloat_round_max : softfloat_round_min ) ) + ? 0 + : 0x3FF; + } + roundBits = sig & 0x3FF; + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + isTiny = + ( softfloat_detectTininess + == softfloat_tininess_beforeRounding ) + || ( exp < -1 ) + || ( sig + roundIncrement < UINT64_C( 0x8000000000000000 ) ); + sig = softfloat_shift64RightJam( sig, - exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + ( 0x7FD < exp ) + || ( UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement ) + ) { + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + if ( roundBits ) softfloat_exceptionFlags |= softfloat_flag_inexact; + sig = ( sig + roundIncrement )>>10; + sig &= ~ ( ! ( roundBits ^ 0x200 ) & roundNearestEven ); + uiZ = packToF64UI( sign, sig ? exp : 0, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/s_roundPackToI32.c b/softfloat/s_roundPackToI32.c new file mode 100755 index 0000000..1c91497 --- /dev/null +++ b/softfloat/s_roundPackToI32.c @@ -0,0 +1,48 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundPackToI32( + bool sign, uint_fast64_t sig, int_fast8_t roundingMode, bool exact ) +{ + bool roundNearestEven; + int roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + roundIncrement = 0x40; + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + roundIncrement = + ( roundingMode == softfloat_round_minMag ) + || ( roundingMode + == ( sign ? softfloat_round_max : softfloat_round_min ) ) + ? 0 + : 0x7F; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + sig32 = sig>>7; + sig32 &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven ); + uZ.ui = sign ? - sig32 : sig32; + z = uZ.i; + if ( z && ( ( z < 0 ) ^ sign ) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? -0x7FFFFFFF - 1 : 0x7FFFFFFF; + +} + diff --git a/softfloat/s_roundPackToI64.c b/softfloat/s_roundPackToI64.c new file mode 100755 index 0000000..b2f5d63 --- /dev/null +++ b/softfloat/s_roundPackToI64.c @@ -0,0 +1,52 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundPackToI64( + bool sign, + uint_fast64_t sig64, + uint_fast64_t sig0, + int_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearestEven, increment; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + increment = ( UINT64_C( 0x8000000000000000 ) <= sig0 ); + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + increment = + ( roundingMode != softfloat_round_minMag ) + && ( roundingMode + == ( sign ? softfloat_round_min : softfloat_round_max ) ) + && sig0; + } + if ( increment ) { + ++sig64; + if ( ! sig64 ) goto invalid; + sig64 &= + ~ ( ! ( sig0 & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + & roundNearestEven ); + } + uZ.ui = sign ? - sig64 : sig64; + z = uZ.i; + if ( z && ( ( z < 0 ) ^ sign ) ) goto invalid; + if ( exact && sig0 ) softfloat_exceptionFlags |= softfloat_flag_inexact; + return z; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + sign ? - INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1 + : INT64_C( 0x7FFFFFFFFFFFFFFF ); + +} + diff --git a/softfloat/s_roundPackToUI32.c b/softfloat/s_roundPackToUI32.c new file mode 100755 index 0000000..ab44ec7 --- /dev/null +++ b/softfloat/s_roundPackToUI32.c @@ -0,0 +1,44 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundPackToUI32( + bool sign, uint_fast64_t sig, int_fast8_t roundingMode, bool exact ) +{ + bool roundNearestEven; + int roundIncrement, roundBits; + uint_fast32_t z; + + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + roundIncrement = 0x40; + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + roundIncrement = + ( roundingMode == softfloat_round_minMag ) + || ( roundingMode + == ( sign ? softfloat_round_max : softfloat_round_min ) ) + ? 0 + : 0x7F; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + z = sig>>7; + z &= ~ ( ! ( roundBits ^ 0x40 ) & roundNearestEven ); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return 0xFFFFFFFF; + +} + diff --git a/softfloat/s_roundPackToUI64.c b/softfloat/s_roundPackToUI64.c new file mode 100755 index 0000000..d42266f --- /dev/null +++ b/softfloat/s_roundPackToUI64.c @@ -0,0 +1,46 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundPackToUI64( + bool sign, + uint_fast64_t sig64, + uint_fast64_t sig0, + int_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearestEven, increment; + + roundNearestEven = ( roundingMode == softfloat_round_nearest_even ); + increment = ( UINT64_C( 0x8000000000000000 ) <= sig0 ); + if ( + ! roundNearestEven + && ( roundingMode != softfloat_round_nearest_maxMag ) + ) { + increment = + ( roundingMode != softfloat_round_minMag ) + && ( roundingMode + == ( sign ? softfloat_round_min : softfloat_round_max ) ) + && sig0; + } + if ( increment ) { + ++sig64; + if ( ! sig64 ) goto invalid; + sig64 &= + ~ ( ! ( sig0 & UINT64_C( 0x7FFFFFFFFFFFFFFF ) ) + & roundNearestEven ); + } + if ( sign && sig64 ) goto invalid; + if ( exact && sig0 ) softfloat_exceptionFlags |= softfloat_flag_inexact; + return sig64; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return UINT64_C( 0xFFFFFFFFFFFFFFFF ); + +} + diff --git a/softfloat/s_shift128ExtraRightJam.c b/softfloat/s_shift128ExtraRightJam.c new file mode 100755 index 0000000..6c57974 --- /dev/null +++ b/softfloat/s_shift128ExtraRightJam.c @@ -0,0 +1,38 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128_extra
+ softfloat_shift128ExtraRightJam(
+ uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+ unsigned int negCount;
+ struct uint128_extra z;
+
+ negCount = - count;
+ if ( count < 64 ) {
+ z.v64 = a64>>count;
+ z.v0 = a64<<( negCount & 63 ) | a0>>count;
+ z.extra = a0<<( negCount & 63 );
+ } else {
+ z.v64 = 0;
+ if ( count == 64 ) {
+ z.v0 = a64;
+ z.extra = a0;
+ } else {
+ extra |= a0;
+ if ( count < 128 ) {
+ z.v0 = a64>>( count & 63 );
+ z.extra = a64<<( negCount & 63 );
+ } else {
+ z.v0 = 0;
+ z.extra = ( count == 128 ) ? a64 : ( a64 != 0 );
+ }
+ }
+ }
+ z.extra |= ( extra != 0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_shift128RightJam.c b/softfloat/s_shift128RightJam.c new file mode 100755 index 0000000..5a4e188 --- /dev/null +++ b/softfloat/s_shift128RightJam.c @@ -0,0 +1,31 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +struct uint128 + softfloat_shift128RightJam( uint64_t a64, uint64_t a0, unsigned int count ) +{ + unsigned int negCount; + struct uint128 z; + + if ( count < 64 ) { + negCount = - count; + z.v64 = a64>>( count & 63 ); + z.v0 = + a64<<( negCount & 63 ) | a0>>count + | ( (uint64_t) ( a0<<( negCount & 63 ) ) != 0 ); + } else { + z.v64 = 0; + z.v0 = + ( count < 128 ) + ? a64>>( count & 63 ) + | ( ( ( a64 & ( ( (uint64_t) 1<<( count & 63 ) ) - 1 ) ) + | a0 ) + != 0 ) + : ( ( a64 | a0 ) != 0 ); + } + return z; + +} + diff --git a/softfloat/s_shift32RightJam.c b/softfloat/s_shift32RightJam.c new file mode 100755 index 0000000..b697a34 --- /dev/null +++ b/softfloat/s_shift32RightJam.c @@ -0,0 +1,15 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint32_t softfloat_shift32RightJam( uint32_t a, unsigned int count ) +{ + + return + ( count < 32 ) + ? a>>count | ( (uint32_t) ( a<<( ( - count ) & 31 ) ) != 0 ) + : ( a != 0 ); + +} + diff --git a/softfloat/s_shift64ExtraRightJam.c b/softfloat/s_shift64ExtraRightJam.c new file mode 100755 index 0000000..167ea54 --- /dev/null +++ b/softfloat/s_shift64ExtraRightJam.c @@ -0,0 +1,23 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint64_extra
+ softfloat_shift64ExtraRightJam(
+ uint64_t a, uint64_t extra, unsigned int count )
+{
+ struct uint64_extra z;
+
+ if ( count < 64 ) {
+ z.v = a>>count;
+ z.extra = a<<( ( - count ) & 63 );
+ } else {
+ z.v = 0;
+ z.extra = ( count == 64 ) ? a : ( a != 0 );
+ }
+ z.extra |= ( extra != 0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_shift64RightJam.c b/softfloat/s_shift64RightJam.c new file mode 100755 index 0000000..ebebb61 --- /dev/null +++ b/softfloat/s_shift64RightJam.c @@ -0,0 +1,15 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint64_t softfloat_shift64RightJam( uint64_t a, unsigned int count ) +{ + + return + ( count < 64 ) + ? a>>count | ( (uint64_t) ( a<<( ( - count ) & 63 ) ) != 0 ) + : ( a != 0 ); + +} + diff --git a/softfloat/s_shortShift128ExtraRightJam.c b/softfloat/s_shortShift128ExtraRightJam.c new file mode 100755 index 0000000..c772740 --- /dev/null +++ b/softfloat/s_shortShift128ExtraRightJam.c @@ -0,0 +1,20 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128_extra
+ softfloat_shortShift128ExtraRightJam(
+ uint64_t a64, uint64_t a0, uint64_t extra, unsigned int count )
+{
+ unsigned int negCount;
+ struct uint128_extra z;
+
+ negCount = - count;
+ z.v64 = a64>>count;
+ z.v0 = a64<<( negCount & 63 ) | a0>>count;
+ z.extra = a0<<( negCount & 63 ) | ( extra != 0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_shortShift128Left.c b/softfloat/s_shortShift128Left.c new file mode 100755 index 0000000..9c29988 --- /dev/null +++ b/softfloat/s_shortShift128Left.c @@ -0,0 +1,16 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_shortShift128Left( uint64_t a64, uint64_t a0, unsigned int count )
+{
+ struct uint128 z;
+
+ z.v64 = a64<<count | a0>>( ( - count ) & 63 );
+ z.v0 = a0<<count;
+ return z;
+
+}
+
diff --git a/softfloat/s_shortShift128Right.c b/softfloat/s_shortShift128Right.c new file mode 100755 index 0000000..f7f4ce8 --- /dev/null +++ b/softfloat/s_shortShift128Right.c @@ -0,0 +1,16 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_shortShift128Right( uint64_t a64, uint64_t a0, unsigned int count )
+{
+ struct uint128 z;
+
+ z.v64 = a64>>count;
+ z.v0 = a64<<( ( - count ) & 63 ) | a0>>count;
+ return z;
+
+}
+
diff --git a/softfloat/s_shortShift192Left.c b/softfloat/s_shortShift192Left.c new file mode 100755 index 0000000..cf1e55d --- /dev/null +++ b/softfloat/s_shortShift192Left.c @@ -0,0 +1,20 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_shortShift192Left(
+ uint64_t a128, uint64_t a64, uint64_t a0, unsigned int count )
+{
+ unsigned int negCount;
+ struct uint192 z;
+
+ negCount = - count;
+ z.v128 = a128<<count | a64>>( negCount & 63 );
+ z.v64 = a64<<count | a0>>( negCount & 63 );
+ z.v0 = a0<<count;
+ return z;
+
+}
+
diff --git a/softfloat/s_shortShift32Right1Jam.c b/softfloat/s_shortShift32Right1Jam.c new file mode 100755 index 0000000..db4c304 --- /dev/null +++ b/softfloat/s_shortShift32Right1Jam.c @@ -0,0 +1,12 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint32_t softfloat_shortShift32Right1Jam( uint32_t a ) +{ + + return a>>1 | ( a & 1 ); + +} + diff --git a/softfloat/s_shortShift64ExtraRightJam.c b/softfloat/s_shortShift64ExtraRightJam.c new file mode 100755 index 0000000..b861c67 --- /dev/null +++ b/softfloat/s_shortShift64ExtraRightJam.c @@ -0,0 +1,17 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint64_extra
+ softfloat_shortShift64ExtraRightJam(
+ uint64_t a, uint64_t extra, unsigned int count )
+{
+ struct uint64_extra z;
+
+ z.v = a>>count;
+ z.extra = a<<( ( - count ) & 63 ) | ( extra != 0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_shortShift64RightJam.c b/softfloat/s_shortShift64RightJam.c new file mode 100755 index 0000000..0da6c93 --- /dev/null +++ b/softfloat/s_shortShift64RightJam.c @@ -0,0 +1,12 @@ + +#include <stdint.h> +#include "platform.h" +#include "primitives.h" + +uint64_t softfloat_shortShift64RightJam( uint64_t a, unsigned int count ) +{ + + return a>>count | ( ( a & ( ( (uint64_t) 1<<count ) - 1 ) ) != 0 ); + +} + diff --git a/softfloat/s_sub128.c b/softfloat/s_sub128.c new file mode 100755 index 0000000..0c4f181 --- /dev/null +++ b/softfloat/s_sub128.c @@ -0,0 +1,17 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint128
+ softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 )
+{
+ struct uint128 z;
+
+ z.v0 = a0 - b0;
+ z.v64 = a64 - b64;
+ z.v64 -= ( a0 < b0 );
+ return z;
+
+}
+
diff --git a/softfloat/s_sub192.c b/softfloat/s_sub192.c new file mode 100755 index 0000000..96f21c9 --- /dev/null +++ b/softfloat/s_sub192.c @@ -0,0 +1,30 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+
+struct uint192
+ softfloat_sub192(
+ uint64_t a128,
+ uint64_t a64,
+ uint64_t a0,
+ uint64_t b128,
+ uint64_t b64,
+ uint64_t b0
+ )
+{
+ struct uint192 z;
+ unsigned int borrow64, borrow128;
+
+ z.v0 = a0 - b0;
+ borrow64 = ( a0 < b0 );
+ z.v64 = a64 - b64;
+ borrow128 = ( a64 < b64 );
+ z.v128 = a128 - b128;
+ borrow128 += ( z.v64 < borrow64 );
+ z.v64 -= borrow64;
+ z.v128 -= borrow128;
+ return z;
+
+}
+
diff --git a/softfloat/s_subMagsF32.c b/softfloat/s_subMagsF32.c new file mode 100755 index 0000000..0c83b02 --- /dev/null +++ b/softfloat/s_subMagsF32.c @@ -0,0 +1,81 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + expDiff = expA - expB; + sigA <<= 7; + sigB <<= 7; + if ( 0 < expDiff ) goto expABigger; + if ( expDiff < 0 ) goto expBBigger; + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + if ( ! expA ) { + expA = 1; + expB = 1; + } + if ( sigB < sigA ) goto aBigger; + if ( sigA < sigB ) goto bBigger; + uiZ = packToF32UI( softfloat_roundingMode == softfloat_round_min, 0, 0 ); + goto uiZ; + expBBigger: + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ ^ 1, 0xFF, 0 ); + goto uiZ; + } + sigA += expA ? 0x40000000 : sigA; + sigA = softfloat_shift32RightJam( sigA, - expDiff ); + sigB |= 0x40000000; + bBigger: + signZ ^= 1; + expZ = expB; + sigZ = sigB - sigA; + goto normRoundPack; + expABigger: + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + sigB += expB ? 0x40000000 : sigB; + sigB = softfloat_shift32RightJam( sigB, expDiff ); + sigA |= 0x40000000; + aBigger: + expZ = expA; + sigZ = sigA - sigB; + normRoundPack: + return softfloat_normRoundPackToF32( signZ, expZ - 1, sigZ ); + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/s_subMagsF64.c b/softfloat/s_subMagsF64.c new file mode 100755 index 0000000..45b81ba --- /dev/null +++ b/softfloat/s_subMagsF64.c @@ -0,0 +1,81 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "primitives.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + expDiff = expA - expB; + sigA <<= 10; + sigB <<= 10; + if ( 0 < expDiff ) goto expABigger; + if ( expDiff < 0 ) goto expBBigger; + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + if ( ! expA ) { + expA = 1; + expB = 1; + } + if ( sigB < sigA ) goto aBigger; + if ( sigA < sigB ) goto bBigger; + uiZ = packToF64UI( softfloat_roundingMode == softfloat_round_min, 0, 0 ); + goto uiZ; + expBBigger: + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ ^ 1, 0x7FF, 0 ); + goto uiZ; + } + sigA += expA ? UINT64_C( 0x4000000000000000 ) : sigA; + sigA = softfloat_shift64RightJam( sigA, - expDiff ); + sigB |= UINT64_C( 0x4000000000000000 ); + bBigger: + signZ ^= 1; + expZ = expB; + sigZ = sigB - sigA; + goto normRoundPack; + expABigger: + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + sigB += expB ? UINT64_C( 0x4000000000000000 ) : sigB; + sigB = softfloat_shift64RightJam( sigB, expDiff ); + sigA |= UINT64_C( 0x4000000000000000 ); + aBigger: + expZ = expA; + sigZ = sigA - sigB; + normRoundPack: + return softfloat_normRoundPackToF64( signZ, expZ - 1, sigZ ); + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/softfloat/softfloat.ac b/softfloat/softfloat.ac new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/softfloat/softfloat.ac diff --git a/softfloat/softfloat.h b/softfloat/softfloat.h new file mode 100755 index 0000000..3eddeed --- /dev/null +++ b/softfloat/softfloat.h @@ -0,0 +1,233 @@ +
+#ifndef softfloat_h
+#define softfloat_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*** UPDATE COMMENTS. ***/
+
+/*============================================================================
+
+This C header file is part of the SoftFloat IEEE Floating-point Arithmetic
+Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include "softfloat_types.h"
+
+/*----------------------------------------------------------------------------
+| Software floating-point underflow tininess-detection mode.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_detectTininess;
+enum {
+ softfloat_tininess_beforeRounding = 0,
+ softfloat_tininess_afterRounding = 1
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point rounding mode.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_roundingMode;
+enum {
+ softfloat_round_nearest_even = 0,
+ softfloat_round_minMag = 1,
+ softfloat_round_min = 2,
+ softfloat_round_max = 3,
+ softfloat_round_nearest_maxMag = 4
+};
+
+/*----------------------------------------------------------------------------
+| Software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t softfloat_exceptionFlags;
+enum {
+ softfloat_flag_inexact = 1,
+ softfloat_flag_underflow = 2,
+ softfloat_flag_overflow = 4,
+ softfloat_flag_infinity = 8,
+ softfloat_flag_invalid = 16
+};
+
+/*----------------------------------------------------------------------------
+| Routine to raise any or all of the software floating-point exception flags.
+*----------------------------------------------------------------------------*/
+void softfloat_raiseFlags( int_fast8_t );
+
+/*----------------------------------------------------------------------------
+| Integer-to-floating-point conversion routines.
+*----------------------------------------------------------------------------*/
+float32_t ui32_to_f32( uint_fast32_t );
+float64_t ui32_to_f64( uint_fast32_t );
+floatx80_t ui32_to_fx80( uint_fast32_t );
+float128_t ui32_to_f128( uint_fast32_t );
+float32_t ui64_to_f32( uint_fast64_t );
+float64_t ui64_to_f64( uint_fast64_t );
+floatx80_t ui64_to_fx80( uint_fast64_t );
+float128_t ui64_to_f128( uint_fast64_t );
+float32_t i32_to_f32( int_fast32_t );
+float64_t i32_to_f64( int_fast32_t );
+floatx80_t i32_to_fx80( int_fast32_t );
+float128_t i32_to_f128( int_fast32_t );
+float32_t i64_to_f32( int_fast64_t );
+float64_t i64_to_f64( int_fast64_t );
+floatx80_t i64_to_fx80( int_fast64_t );
+float128_t i64_to_f128( int_fast64_t );
+
+/*----------------------------------------------------------------------------
+| 32-bit (single-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f32_to_ui32( float32_t, int_fast8_t, bool );
+uint_fast64_t f32_to_ui64( float32_t, int_fast8_t, bool );
+int_fast32_t f32_to_i32( float32_t, int_fast8_t, bool );
+int_fast64_t f32_to_i64( float32_t, int_fast8_t, bool );
+uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool );
+uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool );
+int_fast32_t f32_to_i32_r_minMag( float32_t, bool );
+int_fast64_t f32_to_i64_r_minMag( float32_t, bool );
+float64_t f32_to_f64( float32_t );
+floatx80_t f32_to_fx80( float32_t );
+float128_t f32_to_f128( float32_t );
+float32_t f32_roundToInt( float32_t, int_fast8_t, bool );
+float32_t f32_add( float32_t, float32_t );
+float32_t f32_sub( float32_t, float32_t );
+float32_t f32_mul( float32_t, float32_t );
+float32_t f32_mulAdd( float32_t, float32_t, float32_t );
+float32_t f32_div( float32_t, float32_t );
+float32_t f32_rem( float32_t, float32_t );
+float32_t f32_sqrt( float32_t );
+bool f32_eq( float32_t, float32_t );
+bool f32_le( float32_t, float32_t );
+bool f32_lt( float32_t, float32_t );
+bool f32_eq_signaling( float32_t, float32_t );
+bool f32_le_quiet( float32_t, float32_t );
+bool f32_lt_quiet( float32_t, float32_t );
+bool f32_isSignalingNaN( float32_t );
+
+/*----------------------------------------------------------------------------
+| 64-bit (double-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f64_to_ui32( float64_t, int_fast8_t, bool );
+uint_fast64_t f64_to_ui64( float64_t, int_fast8_t, bool );
+int_fast32_t f64_to_i32( float64_t, int_fast8_t, bool );
+int_fast64_t f64_to_i64( float64_t, int_fast8_t, bool );
+uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool );
+uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool );
+int_fast32_t f64_to_i32_r_minMag( float64_t, bool );
+int_fast64_t f64_to_i64_r_minMag( float64_t, bool );
+float32_t f64_to_f32( float64_t );
+floatx80_t f64_to_fx80( float64_t );
+float128_t f64_to_f128( float64_t );
+float64_t f64_roundToInt( float64_t, int_fast8_t, bool );
+float64_t f64_add( float64_t, float64_t );
+float64_t f64_sub( float64_t, float64_t );
+float64_t f64_mul( float64_t, float64_t );
+float64_t f64_mulAdd( float64_t, float64_t, float64_t );
+float64_t f64_div( float64_t, float64_t );
+float64_t f64_rem( float64_t, float64_t );
+float64_t f64_sqrt( float64_t );
+bool f64_eq( float64_t, float64_t );
+bool f64_le( float64_t, float64_t );
+bool f64_lt( float64_t, float64_t );
+bool f64_eq_signaling( float64_t, float64_t );
+bool f64_le_quiet( float64_t, float64_t );
+bool f64_lt_quiet( float64_t, float64_t );
+bool f64_isSignalingNaN( float64_t );
+
+/*----------------------------------------------------------------------------
+| Extended double-precision rounding precision. Valid values are 32, 64, and
+| 80.
+*----------------------------------------------------------------------------*/
+extern int_fast8_t floatx80_roundingPrecision;
+
+/*----------------------------------------------------------------------------
+| Extended double-precision floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t fx80_to_ui32( floatx80_t, int_fast8_t, bool );
+uint_fast64_t fx80_to_ui64( floatx80_t, int_fast8_t, bool );
+int_fast32_t fx80_to_i32( floatx80_t, int_fast8_t, bool );
+int_fast64_t fx80_to_i64( floatx80_t, int_fast8_t, bool );
+uint_fast32_t fx80_to_ui32_r_minMag( floatx80_t, bool );
+uint_fast64_t fx80_to_ui64_r_minMag( floatx80_t, bool );
+int_fast32_t fx80_to_i32_r_minMag( floatx80_t, bool );
+int_fast64_t fx80_to_i64_r_minMag( floatx80_t, bool );
+float32_t fx80_to_f32( floatx80_t );
+float64_t fx80_to_f64( floatx80_t );
+float128_t fx80_to_f128( floatx80_t );
+floatx80_t fx80_roundToInt( floatx80_t, int_fast8_t, bool );
+floatx80_t fx80_add( floatx80_t, floatx80_t );
+floatx80_t fx80_sub( floatx80_t, floatx80_t );
+floatx80_t fx80_mul( floatx80_t, floatx80_t );
+floatx80_t fx80_mulAdd( floatx80_t, floatx80_t, floatx80_t );
+floatx80_t fx80_div( floatx80_t, floatx80_t );
+floatx80_t fx80_rem( floatx80_t, floatx80_t );
+floatx80_t fx80_sqrt( floatx80_t );
+bool fx80_eq( floatx80_t, floatx80_t );
+bool fx80_le( floatx80_t, floatx80_t );
+bool fx80_lt( floatx80_t, floatx80_t );
+bool fx80_eq_signaling( floatx80_t, floatx80_t );
+bool fx80_le_quiet( floatx80_t, floatx80_t );
+bool fx80_lt_quiet( floatx80_t, floatx80_t );
+bool fx80_isSignalingNaN( floatx80_t );
+
+/*----------------------------------------------------------------------------
+| 128-bit (quadruple-precision) floating-point operations.
+*----------------------------------------------------------------------------*/
+uint_fast32_t f128_to_ui32( float128_t, int_fast8_t, bool );
+uint_fast64_t f128_to_ui64( float128_t, int_fast8_t, bool );
+int_fast32_t f128_to_i32( float128_t, int_fast8_t, bool );
+int_fast64_t f128_to_i64( float128_t, int_fast8_t, bool );
+uint_fast32_t f128_to_ui32_r_minMag( float128_t, bool );
+uint_fast64_t f128_to_ui64_r_minMag( float128_t, bool );
+int_fast32_t f128_to_i32_r_minMag( float128_t, bool );
+int_fast64_t f128_to_i64_r_minMag( float128_t, bool );
+float32_t f128_to_f32( float128_t );
+float64_t f128_to_f64( float128_t );
+floatx80_t f128_to_fx80( float128_t );
+float128_t f128_roundToInt( float128_t, int_fast8_t, bool );
+float128_t f128_add( float128_t, float128_t );
+float128_t f128_sub( float128_t, float128_t );
+float128_t f128_mul( float128_t, float128_t );
+float128_t f128_mulAdd( float128_t, float128_t, float128_t );
+float128_t f128_div( float128_t, float128_t );
+float128_t f128_rem( float128_t, float128_t );
+float128_t f128_sqrt( float128_t );
+bool f128_eq( float128_t, float128_t );
+bool f128_le( float128_t, float128_t );
+bool f128_lt( float128_t, float128_t );
+bool f128_eq_signaling( float128_t, float128_t );
+bool f128_le_quiet( float128_t, float128_t );
+bool f128_lt_quiet( float128_t, float128_t );
+bool f128_isSignalingNaN( float128_t );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/softfloat/softfloat.mk.in b/softfloat/softfloat.mk.in new file mode 100644 index 0000000..59993cb --- /dev/null +++ b/softfloat/softfloat.mk.in @@ -0,0 +1,113 @@ +softfloat_subproject_deps = \ + sotfloat_riscv \ + +softfloat_hdrs = \ + internals.h \ + primitives.h \ + softfloat.h \ + +softfloat_c_srcs = \ + f32_add.c \ + f32_div.c \ + f32_eq.c \ + f32_eq_signaling.c \ + f32_isSignalingNaN.c \ + f32_le.c \ + f32_le_quiet.c \ + f32_lt.c \ + f32_lt_quiet.c \ + f32_mulAdd.c \ + f32_mul.c \ + f32_rem.c \ + f32_roundToInt.c \ + f32_sqrt.c \ + f32_sub.c \ + f32_to_f64.c \ + f32_to_i32.c \ + f32_to_i32_r_minMag.c \ + f32_to_i64.c \ + f32_to_i64_r_minMag.c \ + f32_to_ui32.c \ + f32_to_ui32_r_minMag.c \ + f32_to_ui64.c \ + f32_to_ui64_r_minMag.c \ + f64_add.c \ + f64_div.c \ + f64_eq.c \ + f64_eq_signaling.c \ + f64_isSignalingNaN.c \ + f64_le.c \ + f64_le_quiet.c \ + f64_lt.c \ + f64_lt_quiet.c \ + f64_mulAdd.c \ + f64_mul.c \ + f64_rem.c \ + f64_roundToInt.c \ + f64_sqrt.c \ + f64_sub.c \ + f64_to_f32.c \ + f64_to_i32.c \ + f64_to_i32_r_minMag.c \ + f64_to_i64.c \ + f64_to_i64_r_minMag.c \ + f64_to_ui32.c \ + f64_to_ui32_r_minMag.c \ + f64_to_ui64.c \ + f64_to_ui64_r_minMag.c \ + i32_to_f32.c \ + i32_to_f64.c \ + i64_to_f32.c \ + i64_to_f64.c \ + s_add128.c \ + s_add192.c \ + s_addMagsF32.c \ + s_addMagsF64.c \ + s_countLeadingZeros32.c \ + s_countLeadingZeros64.c \ + s_countLeadingZeros8.c \ + s_eq128.c \ + s_estimateDiv128To64.c \ + s_estimateSqrt32.c \ + s_le128.c \ + s_lt128.c \ + s_mul128By64To192.c \ + s_mul128To256.c \ + s_mul64To128.c \ + s_mulAddF32.c \ + s_mulAddF64.c \ + s_normRoundPackToF32.c \ + s_normRoundPackToF64.c \ + s_normSubnormalF32Sig.c \ + s_normSubnormalF64Sig.c \ + softfloat_state.c \ + s_roundPackToF32.c \ + s_roundPackToF64.c \ + s_roundPackToI32.c \ + s_roundPackToI64.c \ + s_roundPackToUI32.c \ + s_roundPackToUI64.c \ + s_shift128ExtraRightJam.c \ + s_shift128RightJam.c \ + s_shift32RightJam.c \ + s_shift64ExtraRightJam.c \ + s_shift64RightJam.c \ + s_shortShift128ExtraRightJam.c \ + s_shortShift128Left.c \ + s_shortShift128Right.c \ + s_shortShift192Left.c \ + s_shortShift32Right1Jam.c \ + s_shortShift64ExtraRightJam.c \ + s_shortShift64RightJam.c \ + s_sub128.c \ + s_sub192.c \ + s_subMagsF32.c \ + s_subMagsF64.c \ + ui32_to_f32.c \ + ui32_to_f64.c \ + ui64_to_f32.c \ + ui64_to_f64.c \ + +softfloat_test_srcs = + +softfloat_install_prog_srcs = diff --git a/softfloat/softfloat_state.c b/softfloat/softfloat_state.c new file mode 100755 index 0000000..8859089 --- /dev/null +++ b/softfloat/softfloat_state.c @@ -0,0 +1,19 @@ +
+/*** COMMENTS. ***/
+
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Floating-point rounding mode, extended double-precision rounding precision,
+| and exception flags.
+*----------------------------------------------------------------------------*/
+int_fast8_t softfloat_roundingMode = softfloat_round_nearest_even;
+int_fast8_t softfloat_detectTininess = init_detectTininess;
+int_fast8_t softfloat_exceptionFlags = 0;
+
+int_fast8_t floatx80_roundingPrecision = 80;
+
diff --git a/softfloat/ui32_to_f32.c b/softfloat/ui32_to_f32.c new file mode 100755 index 0000000..ba0fc1a --- /dev/null +++ b/softfloat/ui32_to_f32.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t ui32_to_f32( uint_fast32_t a )
+{
+ union ui32_f32 uZ;
+
+ if ( ! a ) {
+ uZ.ui = 0;
+ return uZ.f;
+ }
+ if ( a & 0x80000000 ) {
+ return
+ softfloat_roundPackToF32(
+ 0, 0x9D, softfloat_shortShift32Right1Jam( a ) );
+ } else {
+ return softfloat_normRoundPackToF32( 0, 0x9C, a );
+ }
+
+}
+
diff --git a/softfloat/ui32_to_f64.c b/softfloat/ui32_to_f64.c new file mode 100755 index 0000000..d0bd177 --- /dev/null +++ b/softfloat/ui32_to_f64.c @@ -0,0 +1,26 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t ui32_to_f64( uint_fast32_t a )
+{
+ uint_fast64_t uiZ;
+ int shiftCount;
+ union ui64_f64 uZ;
+
+ if ( ! a ) {
+ uiZ = 0;
+ } else {
+ shiftCount = softfloat_countLeadingZeros32( a ) + 21;
+ uiZ =
+ packToF64UI(
+ 0, 0x432 - shiftCount, (uint_fast64_t) a<<shiftCount );
+ }
+ uZ.ui = uiZ;
+ return uZ.f;
+
+}
+
diff --git a/softfloat/ui64_to_f32.c b/softfloat/ui64_to_f32.c new file mode 100755 index 0000000..82afbdc --- /dev/null +++ b/softfloat/ui64_to_f32.c @@ -0,0 +1,31 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float32_t ui64_to_f32( uint_fast64_t a )
+{
+ int shiftCount;
+ union ui32_f32 u;
+ uint_fast32_t sig;
+
+ shiftCount = softfloat_countLeadingZeros64( a ) - 40;
+ if ( 0 <= shiftCount ) {
+ u.ui =
+ a ? packToF32UI(
+ 0, 0x95 - shiftCount, (uint_fast32_t) a<<shiftCount )
+ : 0;
+ return u.f;
+ } else {
+ shiftCount += 7;
+ sig =
+ ( shiftCount < 0 )
+ ? softfloat_shortShift64RightJam( a, - shiftCount )
+ : (uint_fast32_t) a<<shiftCount;
+ return softfloat_roundPackToF32( 0, 0x9C - shiftCount, sig );
+ }
+
+}
+
diff --git a/softfloat/ui64_to_f64.c b/softfloat/ui64_to_f64.c new file mode 100755 index 0000000..52c158b --- /dev/null +++ b/softfloat/ui64_to_f64.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "primitives.h"
+#include "internals.h"
+#include "softfloat.h"
+
+float64_t ui64_to_f64( uint_fast64_t a )
+{
+ union ui64_f64 uZ;
+
+ if ( ! a ) {
+ uZ.ui = 0;
+ return uZ.f;
+ }
+ if ( a & UINT64_C( 0x8000000000000000 ) ) {
+ return
+ softfloat_roundPackToF64(
+ 0, 0x43D, softfloat_shortShift64RightJam( a, 1 ) );
+ } else {
+ return softfloat_normRoundPackToF64( 0, 0x43C, a );
+ }
+
+}
+
diff --git a/softfloat_riscv b/softfloat_riscv deleted file mode 120000 index c6c4193..0000000 --- a/softfloat_riscv +++ /dev/null @@ -1 +0,0 @@ -../sim/softfloat_riscv/
\ No newline at end of file diff --git a/softfloat_riscv/platform.h b/softfloat_riscv/platform.h new file mode 100755 index 0000000..6c54313 --- /dev/null +++ b/softfloat_riscv/platform.h @@ -0,0 +1,42 @@ + +/*============================================================================ + +*** FIX. + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define LITTLEENDIAN + +#ifndef UINT64_C +# define UINT64_C(x) (x ## ULL) +# define INT64_C(x) (x ## LL) +#endif diff --git a/softfloat_riscv/s_commonNaNToF32UI.c b/softfloat_riscv/s_commonNaNToF32UI.c new file mode 100755 index 0000000..61f2735 --- /dev/null +++ b/softfloat_riscv/s_commonNaNToF32UI.c @@ -0,0 +1,17 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the canonical NaN `a' to the single-
+| precision floating-point format.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+{
+
+ return (uint_fast32_t) a.sign<<31 | 0x7FFFFFFF;
+
+}
+
diff --git a/softfloat_riscv/s_commonNaNToF64UI.c b/softfloat_riscv/s_commonNaNToF64UI.c new file mode 100755 index 0000000..da36c04 --- /dev/null +++ b/softfloat_riscv/s_commonNaNToF64UI.c @@ -0,0 +1,18 @@ + +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the double- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN a ) +{ + + return + (uint_fast64_t) a.sign<<63 | UINT64_C( 0x7FFFFFFFFFFFFFFF ); + +} + diff --git a/softfloat_riscv/s_f32UIToCommonNaN.c b/softfloat_riscv/s_f32UIToCommonNaN.c new file mode 100755 index 0000000..9ee0db9 --- /dev/null +++ b/softfloat_riscv/s_f32UIToCommonNaN.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t uiA )
+{
+ struct commonNaN z;
+
+ if ( softfloat_isSigNaNF32UI( uiA ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ z.sign = uiA>>31;
+ z.v64 = (uint_fast64_t) 0x7FFFF <<41;
+ z.v0 = 0;
+ return z;
+
+}
+
diff --git a/softfloat_riscv/s_f64UIToCommonNaN.c b/softfloat_riscv/s_f64UIToCommonNaN.c new file mode 100755 index 0000000..84d8ca0 --- /dev/null +++ b/softfloat_riscv/s_f64UIToCommonNaN.c @@ -0,0 +1,25 @@ +
+#include <stdint.h>
+#include "platform.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the double-precision floating-point NaN
+| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
+| exception is raised.
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t uiA )
+{
+ struct commonNaN z;
+
+ if ( softfloat_isSigNaNF64UI( uiA ) ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ z.sign = uiA>>63;
+ z.v64 = (uint_fast64_t) 0xFFFFFFFFFFFFF <<12;
+ z.v0 = 0;
+ return z;
+
+}
+
diff --git a/softfloat_riscv/s_isSigNaNF32UI.c b/softfloat_riscv/s_isSigNaNF32UI.c new file mode 100755 index 0000000..0a9c33f --- /dev/null +++ b/softfloat_riscv/s_isSigNaNF32UI.c @@ -0,0 +1,13 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +bool softfloat_isSigNaNF32UI( uint_fast32_t ui ) +{ + + return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); + +} + diff --git a/softfloat_riscv/s_isSigNaNF64UI.c b/softfloat_riscv/s_isSigNaNF64UI.c new file mode 100755 index 0000000..d255213 --- /dev/null +++ b/softfloat_riscv/s_isSigNaNF64UI.c @@ -0,0 +1,15 @@ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "specialize.h" + +bool softfloat_isSigNaNF64UI( uint_fast64_t ui ) +{ + + return + ( ( ui>>51 & 0xFFF ) == 0xFFE ) + && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) ); + +} + diff --git a/softfloat_riscv/s_propagateNaNF32UI.c b/softfloat_riscv/s_propagateNaNF32UI.c new file mode 100755 index 0000000..07774e8 --- /dev/null +++ b/softfloat_riscv/s_propagateNaNF32UI.c @@ -0,0 +1,55 @@ +
+/*** UPDATE COMMENTS. ***/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "platform.h"
+#include "internals.h"
+#include "specialize.h"
+#include "softfloat.h"
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+
+uint_fast32_t
+ softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB )
+{
+ bool isNaNA, isSigNaNA, isNaNB, isSigNaNB;
+ uint_fast32_t uiMagA, uiMagB;
+
+ /*------------------------------------------------------------------------
+ *------------------------------------------------------------------------*/
+ isNaNA = isNaNF32UI( uiA );
+ isSigNaNA = softfloat_isSigNaNF32UI( uiA );
+ isNaNB = isNaNF32UI( uiB );
+ isSigNaNB = softfloat_isSigNaNF32UI( uiB );
+ /*------------------------------------------------------------------------
+ | Make NaNs non-signaling.
+ *------------------------------------------------------------------------*/
+ uiA |= 0x00400000;
+ uiB |= 0x00400000;
+ /*------------------------------------------------------------------------
+ *------------------------------------------------------------------------*/
+ if ( isSigNaNA | isSigNaNB ) {
+ softfloat_raiseFlags( softfloat_flag_invalid );
+ }
+ if ( isSigNaNA ) {
+ if ( isSigNaNB ) goto returnLargerSignificand;
+ return isNaNB ? uiB : uiA;
+ } else if ( isNaNA ) {
+ if ( isSigNaNB || ! isNaNB ) return uiA;
+ returnLargerSignificand:
+ uiMagA = uiA<<1;
+ uiMagB = uiB<<1;
+ if ( uiMagA < uiMagB ) return uiB;
+ if ( uiMagB < uiMagA ) return uiA;
+ return ( uiA < uiB ) ? uiA : uiB;
+ } else {
+ return uiB;
+ }
+
+}
+
diff --git a/softfloat_riscv/s_propagateNaNF64UI.c b/softfloat_riscv/s_propagateNaNF64UI.c new file mode 100755 index 0000000..0ff6446 --- /dev/null +++ b/softfloat_riscv/s_propagateNaNF64UI.c @@ -0,0 +1,55 @@ + +/*** UPDATE COMMENTS. ***/ + +#include <stdbool.h> +#include <stdint.h> +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Takes two double-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + bool isNaNA, isSigNaNA, isNaNB, isSigNaNB; + uint_fast64_t uiMagA, uiMagB; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + isNaNA = isNaNF64UI( uiA ); + isSigNaNA = softfloat_isSigNaNF64UI( uiA ); + isNaNB = isNaNF64UI( uiB ); + isSigNaNB = softfloat_isSigNaNF64UI( uiB ); + /*------------------------------------------------------------------------ + | Make NaNs non-signaling. + *------------------------------------------------------------------------*/ + uiA |= UINT64_C( 0x0008000000000000 ); + uiB |= UINT64_C( 0x0008000000000000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( isSigNaNA | isSigNaNB ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + if ( isSigNaNA ) { + if ( isSigNaNB ) goto returnLargerSignificand; + return isNaNB ? uiB : uiA; + } else if ( isNaNA ) { + if ( isSigNaNB || ! isNaNB ) return uiA; + returnLargerSignificand: + uiMagA = uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF ); + uiMagB = uiB & UINT64_C( 0x7FFFFFFFFFFFFFFF ); + if ( uiMagA < uiMagB ) return uiB; + if ( uiMagB < uiMagA ) return uiA; + return ( uiA < uiB ) ? uiA : uiB; + } else { + return uiB; + } + +} + diff --git a/softfloat_riscv/softfloat_raiseFlags.c b/softfloat_riscv/softfloat_raiseFlags.c new file mode 100755 index 0000000..c0c0dc8 --- /dev/null +++ b/softfloat_riscv/softfloat_raiseFlags.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +*** FIX. + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `float_exception_flags |= flags;'. +*----------------------------------------------------------------------------*/ + +void softfloat_raiseFlags( int_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/softfloat_riscv/softfloat_riscv.ac b/softfloat_riscv/softfloat_riscv.ac new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/softfloat_riscv/softfloat_riscv.ac diff --git a/softfloat_riscv/softfloat_riscv.mk.in b/softfloat_riscv/softfloat_riscv.mk.in new file mode 100644 index 0000000..0b898ed --- /dev/null +++ b/softfloat_riscv/softfloat_riscv.mk.in @@ -0,0 +1,21 @@ +softfloat_riscv_subproject_deps = \ + +softfloat_riscv_hdrs = \ + softfloat_types.h \ + platform.h \ + specialize.h \ + +softfloat_riscv_c_srcs = \ + softfloat_raiseFlags.c \ + s_commonNaNToF32UI.c \ + s_commonNaNToF64UI.c \ + s_f32UIToCommonNaN.c \ + s_f64UIToCommonNaN.c \ + s_isSigNaNF32UI.c \ + s_isSigNaNF64UI.c \ + s_propagateNaNF32UI.c \ + s_propagateNaNF64UI.c \ + +softfloat_riscv_test_srcs = + +softfloat_riscv_install_prog_srcs = diff --git a/softfloat_riscv/softfloat_types.h b/softfloat_riscv/softfloat_types.h new file mode 100755 index 0000000..9fada89 --- /dev/null +++ b/softfloat_riscv/softfloat_types.h @@ -0,0 +1,16 @@ + +#ifndef softfloat_types_h +#define softfloat_types_h + +/*** COMMENTS. ***/ + +#include <stdbool.h> +#include <stdint.h> + +typedef uint32_t float32_t; +typedef uint64_t float64_t; +typedef struct { uint64_t v; uint16_t x; } floatx80_t; +typedef struct { uint64_t v[ 2 ]; } float128_t; + +#endif + diff --git a/softfloat_riscv/specialize.h b/softfloat_riscv/specialize.h new file mode 100755 index 0000000..bf57bc9 --- /dev/null +++ b/softfloat_riscv/specialize.h @@ -0,0 +1,113 @@ +
+/*============================================================================
+
+*** FIX.
+
+This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#define init_detectTininess softfloat_tininess_beforeRounding;
+
+/*----------------------------------------------------------------------------
+| Structure used to transfer NaN representations from one format to another.
+*----------------------------------------------------------------------------*/
+struct commonNaN {
+ bool sign;
+ uint64_t v64, v0;
+};
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF32UI 0xFFFFFFFF
+
+/*----------------------------------------------------------------------------
+| Returns 1 if the single-precision floating-point value `a' is a signaling
+| NaN; otherwise, returns 0.
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF32UI( uint_fast32_t ui )
+ { return ( ( ui>>22 & 0x1FF ) == 0x1FE ) && ( ui & 0x003FFFFF ); }
+#else
+bool softfloat_isSigNaNF32UI( uint_fast32_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+struct commonNaN softfloat_f32UIToCommonNaN( uint_fast32_t );
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN a )
+ { return (uint_fast32_t) a.sign<<31 | 0x7FFFFFFF; }
+#else
+uint_fast32_t softfloat_commonNaNToF32UI( struct commonNaN );
+#endif
+
+/*----------------------------------------------------------------------------
+| Takes two single-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast32_t softfloat_propagateNaNF32UI( uint_fast32_t, uint_fast32_t );
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+#define defaultNaNF64UI UINT64_C(0xFFF8000000000000)
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+#if defined INLINE_LEVEL && ( 1 <= INLINE_LEVEL )
+INLINE bool softfloat_isSigNaNF64UI( uint_fast64_t ui )
+{
+ return
+ ( ( ui>>51 & 0xFFF ) == 0xFFE )
+ && ( ui & UINT64_C( 0x0007FFFFFFFFFFFF ) );
+}
+#else
+bool softfloat_isSigNaNF64UI( uint_fast64_t );
+#endif
+
+/*----------------------------------------------------------------------------
+*----------------------------------------------------------------------------*/
+/*** MIGHT BE INLINE'D. ***/
+struct commonNaN softfloat_f64UIToCommonNaN( uint_fast64_t );
+uint_fast64_t softfloat_commonNaNToF64UI( struct commonNaN );
+
+/*----------------------------------------------------------------------------
+| Takes two double-precision floating-point values `a' and `b', one of which
+| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
+| signaling NaN, the invalid exception is raised.
+*----------------------------------------------------------------------------*/
+uint_fast64_t softfloat_propagateNaNF64UI( uint_fast64_t, uint_fast64_t );
+
|