diff options
author | Jim Wilson <jim.wilson@linaro.org> | 2017-04-08 07:10:38 -0700 |
---|---|---|
committer | Jim Wilson <jim.wilson@linaro.org> | 2017-04-08 07:10:38 -0700 |
commit | ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681 (patch) | |
tree | 83574fac96777dad9d81878088f5432ab76616cc /sim | |
parent | aebcde5eb475befba571ca9ae7b6c58126d41160 (diff) | |
download | fsf-binutils-gdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.zip fsf-binutils-gdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.tar.gz fsf-binutils-gdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.tar.bz2 |
Support the fcmXX zero instructions.
sim/aarch64/
* simulator.c (do_scalar_FCMGE_zero): New.
(do_scalar_FCMLE_zero, do_scalar_FCMGT_zero, do_scalar_FCMEQ_zero)
(do_scalar_FCMLT_zero): Likewise.
(do_scalar_vec): Add calls to new functions.
sim/testsuite/sim/aarch64/
* fcmXX.s: New.
Diffstat (limited to 'sim')
-rw-r--r-- | sim/aarch64/ChangeLog | 6 | ||||
-rw-r--r-- | sim/aarch64/simulator.c | 145 | ||||
-rw-r--r-- | sim/testsuite/sim/aarch64/ChangeLog | 4 | ||||
-rw-r--r-- | sim/testsuite/sim/aarch64/fcmXX.s | 77 |
4 files changed, 232 insertions, 0 deletions
diff --git a/sim/aarch64/ChangeLog b/sim/aarch64/ChangeLog index 1d97291..0b3d21d 100644 --- a/sim/aarch64/ChangeLog +++ b/sim/aarch64/ChangeLog @@ -1,3 +1,9 @@ +2017-04-08 Jim Wilson <jim.wilson@linaro.org> + + * simulator.c (do_scalar_FCMGE_zero, do_scalar_FCMLE_zero, + do_scalar_FCMGT_zero, do_scalar_FCMEQ_zero, do_scalar_FCMLT_zero): New. + (do_scalar_vec): Add calls to new functions. + 2017-03-25 Jim Wilson <jim.wilson@linaro.org> * simulator.c (set_flags_for_add32): Cast result to uint32_t in carry diff --git a/sim/aarch64/simulator.c b/sim/aarch64/simulator.c index f0668ad..c2e02b1 100644 --- a/sim/aarch64/simulator.c +++ b/sim/aarch64/simulator.c @@ -8926,6 +8926,146 @@ do_scalar_SSHL (sim_cpu *cpu) aarch64_get_vec_s64 (cpu, rn, 0) >> - shift); } +/* Floating point scalar compare greater than or equal to 0. */ +static void +do_scalar_FCMGE_zero (sim_cpu *cpu) +{ + /* instr [31,23] = 0111 1110 1 + instr [22,22] = size + instr [21,16] = 1000 00 + instr [15,10] = 1100 10 + instr [9, 5] = Rn + instr [4, 0] = Rd. */ + + unsigned size = INSTR (22, 22); + unsigned rn = INSTR (9, 5); + unsigned rd = INSTR (4, 0); + + NYI_assert (31, 23, 0x0FD); + NYI_assert (21, 16, 0x20); + NYI_assert (15, 10, 0x32); + + TRACE_DECODE (cpu, "emulated at line %d", __LINE__); + if (size) + aarch64_set_vec_u64 (cpu, rd, 0, + aarch64_get_vec_double (cpu, rn, 0) >= 0.0 ? -1 : 0); + else + aarch64_set_vec_u32 (cpu, rd, 0, + aarch64_get_vec_float (cpu, rn, 0) >= 0.0 ? -1 : 0); +} + +/* Floating point scalar compare less than or equal to 0. */ +static void +do_scalar_FCMLE_zero (sim_cpu *cpu) +{ + /* instr [31,23] = 0111 1110 1 + instr [22,22] = size + instr [21,16] = 1000 00 + instr [15,10] = 1101 10 + instr [9, 5] = Rn + instr [4, 0] = Rd. */ + + unsigned size = INSTR (22, 22); + unsigned rn = INSTR (9, 5); + unsigned rd = INSTR (4, 0); + + NYI_assert (31, 23, 0x0FD); + NYI_assert (21, 16, 0x20); + NYI_assert (15, 10, 0x36); + + TRACE_DECODE (cpu, "emulated at line %d", __LINE__); + if (size) + aarch64_set_vec_u64 (cpu, rd, 0, + aarch64_get_vec_double (cpu, rn, 0) <= 0.0 ? -1 : 0); + else + aarch64_set_vec_u32 (cpu, rd, 0, + aarch64_get_vec_float (cpu, rn, 0) <= 0.0 ? -1 : 0); +} + +/* Floating point scalar compare greater than 0. */ +static void +do_scalar_FCMGT_zero (sim_cpu *cpu) +{ + /* instr [31,23] = 0101 1110 1 + instr [22,22] = size + instr [21,16] = 1000 00 + instr [15,10] = 1100 10 + instr [9, 5] = Rn + instr [4, 0] = Rd. */ + + unsigned size = INSTR (22, 22); + unsigned rn = INSTR (9, 5); + unsigned rd = INSTR (4, 0); + + NYI_assert (31, 23, 0x0BD); + NYI_assert (21, 16, 0x20); + NYI_assert (15, 10, 0x32); + + TRACE_DECODE (cpu, "emulated at line %d", __LINE__); + if (size) + aarch64_set_vec_u64 (cpu, rd, 0, + aarch64_get_vec_double (cpu, rn, 0) > 0.0 ? -1 : 0); + else + aarch64_set_vec_u32 (cpu, rd, 0, + aarch64_get_vec_float (cpu, rn, 0) > 0.0 ? -1 : 0); +} + +/* Floating point scalar compare equal to 0. */ +static void +do_scalar_FCMEQ_zero (sim_cpu *cpu) +{ + /* instr [31,23] = 0101 1110 1 + instr [22,22] = size + instr [21,16] = 1000 00 + instr [15,10] = 1101 10 + instr [9, 5] = Rn + instr [4, 0] = Rd. */ + + unsigned size = INSTR (22, 22); + unsigned rn = INSTR (9, 5); + unsigned rd = INSTR (4, 0); + + NYI_assert (31, 23, 0x0BD); + NYI_assert (21, 16, 0x20); + NYI_assert (15, 10, 0x36); + + TRACE_DECODE (cpu, "emulated at line %d", __LINE__); + if (size) + aarch64_set_vec_u64 (cpu, rd, 0, + aarch64_get_vec_double (cpu, rn, 0) == 0.0 ? -1 : 0); + else + aarch64_set_vec_u32 (cpu, rd, 0, + aarch64_get_vec_float (cpu, rn, 0) == 0.0 ? -1 : 0); +} + +/* Floating point scalar compare less than 0. */ +static void +do_scalar_FCMLT_zero (sim_cpu *cpu) +{ + /* instr [31,23] = 0101 1110 1 + instr [22,22] = size + instr [21,16] = 1000 00 + instr [15,10] = 1110 10 + instr [9, 5] = Rn + instr [4, 0] = Rd. */ + + unsigned size = INSTR (22, 22); + unsigned rn = INSTR (9, 5); + unsigned rd = INSTR (4, 0); + + NYI_assert (31, 23, 0x0BD); + NYI_assert (21, 16, 0x20); + NYI_assert (15, 10, 0x3A); + + TRACE_DECODE (cpu, "emulated at line %d", __LINE__); + if (size) + aarch64_set_vec_u64 (cpu, rd, 0, + aarch64_get_vec_double (cpu, rn, 0) < 0.0 ? -1 : 0); + else + aarch64_set_vec_u32 (cpu, rd, 0, + aarch64_get_vec_float (cpu, rn, 0) < 0.0 ? -1 : 0); +} + static void do_scalar_shift (sim_cpu *cpu) { @@ -9249,7 +9389,9 @@ do_scalar_vec (sim_cpu *cpu) case 0x0D: do_scalar_CMGT (cpu); return; case 0x11: do_scalar_USHL (cpu); return; case 0x2E: do_scalar_NEG (cpu); return; + case 0x32: do_scalar_FCMGE_zero (cpu); return; case 0x35: do_scalar_FABD (cpu); return; + case 0x36: do_scalar_FCMLE_zero (cpu); return; case 0x39: do_scalar_FCM (cpu); return; case 0x3B: do_scalar_FCM (cpu); return; default: @@ -9263,6 +9405,9 @@ do_scalar_vec (sim_cpu *cpu) { case 0x21: do_double_add (cpu); return; case 0x11: do_scalar_SSHL (cpu); return; + case 0x32: do_scalar_FCMGT_zero (cpu); return; + case 0x36: do_scalar_FCMEQ_zero (cpu); return; + case 0x3A: do_scalar_FCMLT_zero (cpu); return; default: HALT_NYI; } diff --git a/sim/testsuite/sim/aarch64/ChangeLog b/sim/testsuite/sim/aarch64/ChangeLog index 0941446..5aaa67f 100644 --- a/sim/testsuite/sim/aarch64/ChangeLog +++ b/sim/testsuite/sim/aarch64/ChangeLog @@ -1,3 +1,7 @@ +2017-04-08 Jim Wilson <jim.wilson@linaro.org> + + * fcmXX.s: New. + 2017-03-25 Jim Wilson <jim.wilson@linaro.org> * adds.s: Add checks for values -2 and 1, where C is not set. diff --git a/sim/testsuite/sim/aarch64/fcmXX.s b/sim/testsuite/sim/aarch64/fcmXX.s new file mode 100644 index 0000000..cc1a2a9 --- /dev/null +++ b/sim/testsuite/sim/aarch64/fcmXX.s @@ -0,0 +1,77 @@ +# mach: aarch64 + +# Check the FP scalar compare zero instructions: fcmeq, fcmle, fcmlt, fcmge, +# fcmgt. +# Check values -1, 0, and 1. + +.include "testutils.inc" + + start + fmov s0, wzr + fcmeq s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #-1 + bne .Lfailure + fmov s0, #-1.0 + fcmeq s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #0 + bne .Lfailure + fmov d0, xzr + fcmeq d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #-1 + bne .Lfailure + fmov d0, #1.0 + fcmeq d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #0 + bne .Lfailure + + fmov s0, #-1.0 + fcmle s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #-1 + bne .Lfailure + fmov d0, #-1.0 + fcmle d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #-1 + bne .Lfailure + + fmov s0, #-1.0 + fcmlt s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #-1 + bne .Lfailure + fmov d0, #-1.0 + fcmlt d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #-1 + bne .Lfailure + + fmov s0, #1.0 + fcmge s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #-1 + bne .Lfailure + fmov d0, #1.0 + fcmge d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #-1 + bne .Lfailure + + fmov s0, #1.0 + fcmgt s1, s0, #0.0 + mov w0, v1.s[0] + cmp w0, #-1 + bne .Lfailure + fmov d0, #1.0 + fcmgt d1, d0, #0.0 + mov x0, v1.d[0] + cmp x0, #-1 + bne .Lfailure + + pass +.Lfailure: + fail |