aboutsummaryrefslogtreecommitdiff
path: root/sim/aarch64
diff options
context:
space:
mode:
authorJim Wilson <jim.wilson@linaro.org>2017-04-08 07:10:38 -0700
committerJim Wilson <jim.wilson@linaro.org>2017-04-08 07:10:38 -0700
commitae27d3fe76ffb54e7d413a67d8c8d76ca78a9681 (patch)
tree83574fac96777dad9d81878088f5432ab76616cc /sim/aarch64
parentaebcde5eb475befba571ca9ae7b6c58126d41160 (diff)
downloadgdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.zip
gdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.tar.gz
gdb-ae27d3fe76ffb54e7d413a67d8c8d76ca78a9681.tar.bz2
Support the fcmXX zero instructions.
sim/aarch64/ * simulator.c (do_scalar_FCMGE_zero): New. (do_scalar_FCMLE_zero, do_scalar_FCMGT_zero, do_scalar_FCMEQ_zero) (do_scalar_FCMLT_zero): Likewise. (do_scalar_vec): Add calls to new functions. sim/testsuite/sim/aarch64/ * fcmXX.s: New.
Diffstat (limited to 'sim/aarch64')
-rw-r--r--sim/aarch64/ChangeLog6
-rw-r--r--sim/aarch64/simulator.c145
2 files changed, 151 insertions, 0 deletions
diff --git a/sim/aarch64/ChangeLog b/sim/aarch64/ChangeLog
index 1d97291..0b3d21d 100644
--- a/sim/aarch64/ChangeLog
+++ b/sim/aarch64/ChangeLog
@@ -1,3 +1,9 @@
+2017-04-08 Jim Wilson <jim.wilson@linaro.org>
+
+ * simulator.c (do_scalar_FCMGE_zero, do_scalar_FCMLE_zero,
+ do_scalar_FCMGT_zero, do_scalar_FCMEQ_zero, do_scalar_FCMLT_zero): New.
+ (do_scalar_vec): Add calls to new functions.
+
2017-03-25 Jim Wilson <jim.wilson@linaro.org>
* simulator.c (set_flags_for_add32): Cast result to uint32_t in carry
diff --git a/sim/aarch64/simulator.c b/sim/aarch64/simulator.c
index f0668ad..c2e02b1 100644
--- a/sim/aarch64/simulator.c
+++ b/sim/aarch64/simulator.c
@@ -8926,6 +8926,146 @@ do_scalar_SSHL (sim_cpu *cpu)
aarch64_get_vec_s64 (cpu, rn, 0) >> - shift);
}
+/* Floating point scalar compare greater than or equal to 0. */
+static void
+do_scalar_FCMGE_zero (sim_cpu *cpu)
+{
+ /* instr [31,23] = 0111 1110 1
+ instr [22,22] = size
+ instr [21,16] = 1000 00
+ instr [15,10] = 1100 10
+ instr [9, 5] = Rn
+ instr [4, 0] = Rd. */
+
+ unsigned size = INSTR (22, 22);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+
+ NYI_assert (31, 23, 0x0FD);
+ NYI_assert (21, 16, 0x20);
+ NYI_assert (15, 10, 0x32);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ aarch64_set_vec_u64 (cpu, rd, 0,
+ aarch64_get_vec_double (cpu, rn, 0) >= 0.0 ? -1 : 0);
+ else
+ aarch64_set_vec_u32 (cpu, rd, 0,
+ aarch64_get_vec_float (cpu, rn, 0) >= 0.0 ? -1 : 0);
+}
+
+/* Floating point scalar compare less than or equal to 0. */
+static void
+do_scalar_FCMLE_zero (sim_cpu *cpu)
+{
+ /* instr [31,23] = 0111 1110 1
+ instr [22,22] = size
+ instr [21,16] = 1000 00
+ instr [15,10] = 1101 10
+ instr [9, 5] = Rn
+ instr [4, 0] = Rd. */
+
+ unsigned size = INSTR (22, 22);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+
+ NYI_assert (31, 23, 0x0FD);
+ NYI_assert (21, 16, 0x20);
+ NYI_assert (15, 10, 0x36);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ aarch64_set_vec_u64 (cpu, rd, 0,
+ aarch64_get_vec_double (cpu, rn, 0) <= 0.0 ? -1 : 0);
+ else
+ aarch64_set_vec_u32 (cpu, rd, 0,
+ aarch64_get_vec_float (cpu, rn, 0) <= 0.0 ? -1 : 0);
+}
+
+/* Floating point scalar compare greater than 0. */
+static void
+do_scalar_FCMGT_zero (sim_cpu *cpu)
+{
+ /* instr [31,23] = 0101 1110 1
+ instr [22,22] = size
+ instr [21,16] = 1000 00
+ instr [15,10] = 1100 10
+ instr [9, 5] = Rn
+ instr [4, 0] = Rd. */
+
+ unsigned size = INSTR (22, 22);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+
+ NYI_assert (31, 23, 0x0BD);
+ NYI_assert (21, 16, 0x20);
+ NYI_assert (15, 10, 0x32);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ aarch64_set_vec_u64 (cpu, rd, 0,
+ aarch64_get_vec_double (cpu, rn, 0) > 0.0 ? -1 : 0);
+ else
+ aarch64_set_vec_u32 (cpu, rd, 0,
+ aarch64_get_vec_float (cpu, rn, 0) > 0.0 ? -1 : 0);
+}
+
+/* Floating point scalar compare equal to 0. */
+static void
+do_scalar_FCMEQ_zero (sim_cpu *cpu)
+{
+ /* instr [31,23] = 0101 1110 1
+ instr [22,22] = size
+ instr [21,16] = 1000 00
+ instr [15,10] = 1101 10
+ instr [9, 5] = Rn
+ instr [4, 0] = Rd. */
+
+ unsigned size = INSTR (22, 22);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+
+ NYI_assert (31, 23, 0x0BD);
+ NYI_assert (21, 16, 0x20);
+ NYI_assert (15, 10, 0x36);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ aarch64_set_vec_u64 (cpu, rd, 0,
+ aarch64_get_vec_double (cpu, rn, 0) == 0.0 ? -1 : 0);
+ else
+ aarch64_set_vec_u32 (cpu, rd, 0,
+ aarch64_get_vec_float (cpu, rn, 0) == 0.0 ? -1 : 0);
+}
+
+/* Floating point scalar compare less than 0. */
+static void
+do_scalar_FCMLT_zero (sim_cpu *cpu)
+{
+ /* instr [31,23] = 0101 1110 1
+ instr [22,22] = size
+ instr [21,16] = 1000 00
+ instr [15,10] = 1110 10
+ instr [9, 5] = Rn
+ instr [4, 0] = Rd. */
+
+ unsigned size = INSTR (22, 22);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+
+ NYI_assert (31, 23, 0x0BD);
+ NYI_assert (21, 16, 0x20);
+ NYI_assert (15, 10, 0x3A);
+
+ TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
+ if (size)
+ aarch64_set_vec_u64 (cpu, rd, 0,
+ aarch64_get_vec_double (cpu, rn, 0) < 0.0 ? -1 : 0);
+ else
+ aarch64_set_vec_u32 (cpu, rd, 0,
+ aarch64_get_vec_float (cpu, rn, 0) < 0.0 ? -1 : 0);
+}
+
static void
do_scalar_shift (sim_cpu *cpu)
{
@@ -9249,7 +9389,9 @@ do_scalar_vec (sim_cpu *cpu)
case 0x0D: do_scalar_CMGT (cpu); return;
case 0x11: do_scalar_USHL (cpu); return;
case 0x2E: do_scalar_NEG (cpu); return;
+ case 0x32: do_scalar_FCMGE_zero (cpu); return;
case 0x35: do_scalar_FABD (cpu); return;
+ case 0x36: do_scalar_FCMLE_zero (cpu); return;
case 0x39: do_scalar_FCM (cpu); return;
case 0x3B: do_scalar_FCM (cpu); return;
default:
@@ -9263,6 +9405,9 @@ do_scalar_vec (sim_cpu *cpu)
{
case 0x21: do_double_add (cpu); return;
case 0x11: do_scalar_SSHL (cpu); return;
+ case 0x32: do_scalar_FCMGT_zero (cpu); return;
+ case 0x36: do_scalar_FCMEQ_zero (cpu); return;
+ case 0x3A: do_scalar_FCMLT_zero (cpu); return;
default:
HALT_NYI;
}