aboutsummaryrefslogtreecommitdiff
path: root/target-unicore32
diff options
context:
space:
mode:
authorGuan Xuetao <gxt@mprc.pku.edu.cn>2011-04-12 16:25:59 +0800
committerBlue Swirl <blauwirbel@gmail.com>2011-04-12 18:48:37 +0000
commit6e64da3cd6890181fbeaaae7c08cfb779f138d82 (patch)
tree04730ffb2826bdc31046d45e24f4a7c893d06e4b /target-unicore32
parent6d76d23e82d0ce8a7874b13f5951aa763e059908 (diff)
downloadqemu-6e64da3cd6890181fbeaaae7c08cfb779f138d82.zip
qemu-6e64da3cd6890181fbeaaae7c08cfb779f138d82.tar.gz
qemu-6e64da3cd6890181fbeaaae7c08cfb779f138d82.tar.bz2
unicore32: add target-unicore32 directory for unicore32-linux-user support
Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn> Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'target-unicore32')
-rw-r--r--target-unicore32/cpu.h182
-rw-r--r--target-unicore32/exec.h50
-rw-r--r--target-unicore32/helper.c487
-rw-r--r--target-unicore32/helper.h70
-rw-r--r--target-unicore32/op_helper.c248
-rw-r--r--target-unicore32/translate.c2105
6 files changed, 3142 insertions, 0 deletions
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
new file mode 100644
index 0000000..1e10049
--- /dev/null
+++ b/target-unicore32/cpu.h
@@ -0,0 +1,182 @@
+/*
+ * UniCore32 virtual CPU header
+ *
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __CPU_UC32_H__
+#define __CPU_UC32_H__
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define ELF_MACHINE EM_UNICORE32
+
+#define CPUState struct CPUState_UniCore32
+
+#include "cpu-defs.h"
+#include "softfloat.h"
+
+#define NB_MMU_MODES 2
+
+typedef struct CPUState_UniCore32 {
+ /* Regs for current mode. */
+ uint32_t regs[32];
+ /* Frequently accessed ASR bits are stored separately for efficiently.
+ This contains all the other bits. Use asr_{read,write} to access
+ the whole ASR. */
+ uint32_t uncached_asr;
+ uint32_t bsr;
+
+ /* Banked registers. */
+ uint32_t banked_bsr[6];
+ uint32_t banked_r29[6];
+ uint32_t banked_r30[6];
+
+ /* asr flag cache for faster execution */
+ uint32_t CF; /* 0 or 1 */
+ uint32_t VF; /* V is the bit 31. All other bits are undefined */
+ uint32_t NF; /* N is bit 31. All other bits are undefined. */
+ uint32_t ZF; /* Z set if zero. */
+
+ /* System control coprocessor (cp0) */
+ struct {
+ uint32_t c0_cpuid;
+ uint32_t c0_cachetype;
+ uint32_t c1_sys; /* System control register. */
+ uint32_t c2_base; /* MMU translation table base. */
+ uint32_t c3_faultstatus; /* Fault status registers. */
+ uint32_t c4_faultaddr; /* Fault address registers. */
+ uint32_t c5_cacheop; /* Cache operation registers. */
+ uint32_t c6_tlbop; /* TLB operation registers. */
+ } cp0;
+
+ /* UniCore-F64 coprocessor state. */
+ struct {
+ float64 regs[16];
+ uint32_t xregs[32];
+ float_status fp_status;
+ } ucf64;
+
+ CPU_COMMON
+
+ /* Internal CPU feature flags. */
+ uint32_t features;
+
+} CPUState_UniCore32;
+
+#define ASR_M (0x1f)
+#define ASR_MODE_USER (0x10)
+#define ASR_MODE_INTR (0x12)
+#define ASR_MODE_PRIV (0x13)
+#define ASR_MODE_TRAP (0x17)
+#define ASR_MODE_EXTN (0x1b)
+#define ASR_MODE_SUSR (0x1f)
+#define ASR_I (1 << 7)
+#define ASR_V (1 << 28)
+#define ASR_C (1 << 29)
+#define ASR_Z (1 << 30)
+#define ASR_N (1 << 31)
+#define ASR_NZCV (ASR_N | ASR_Z | ASR_C | ASR_V)
+#define ASR_RESERVED (~(ASR_M | ASR_I | ASR_NZCV))
+
+#define UC32_EXCP_PRIV (ASR_MODE_PRIV)
+#define UC32_EXCP_TRAP (ASR_MODE_TRAP)
+
+/* Return the current ASR value. */
+target_ulong cpu_asr_read(CPUState *env1);
+/* Set the ASR. Note that some bits of mask must be all-set or all-clear. */
+void cpu_asr_write(CPUState *env1, target_ulong val, target_ulong mask);
+
+/* UniCore-F64 system registers. */
+#define UC32_UCF64_FPSCR (31)
+#define UCF64_FPSCR_MASK (0x27ffffff)
+#define UCF64_FPSCR_RND_MASK (0x7)
+#define UCF64_FPSCR_RND(r) (((r) >> 0) & UCF64_FPSCR_RND_MASK)
+#define UCF64_FPSCR_TRAPEN_MASK (0x7f)
+#define UCF64_FPSCR_TRAPEN(r) (((r) >> 10) & UCF64_FPSCR_TRAPEN_MASK)
+#define UCF64_FPSCR_FLAG_MASK (0x3ff)
+#define UCF64_FPSCR_FLAG(r) (((r) >> 17) & UCF64_FPSCR_FLAG_MASK)
+#define UCF64_FPSCR_FLAG_ZERO (1 << 17)
+#define UCF64_FPSCR_FLAG_INFINITY (1 << 18)
+#define UCF64_FPSCR_FLAG_INVALID (1 << 19)
+#define UCF64_FPSCR_FLAG_UNDERFLOW (1 << 20)
+#define UCF64_FPSCR_FLAG_OVERFLOW (1 << 21)
+#define UCF64_FPSCR_FLAG_INEXACT (1 << 22)
+#define UCF64_FPSCR_FLAG_HUGEINT (1 << 23)
+#define UCF64_FPSCR_FLAG_DENORMAL (1 << 24)
+#define UCF64_FPSCR_FLAG_UNIMP (1 << 25)
+#define UCF64_FPSCR_FLAG_DIVZERO (1 << 26)
+
+#define UC32_HWCAP_CMOV 4 /* 1 << 2 */
+#define UC32_HWCAP_UCF64 8 /* 1 << 3 */
+
+#define UC32_CPUID(env) (env->cp0.c0_cpuid)
+#define UC32_CPUID_UCV2 0x40010863
+#define UC32_CPUID_ANY 0xffffffff
+
+#define cpu_init uc32_cpu_init
+#define cpu_exec uc32_cpu_exec
+#define cpu_signal_handler uc32_cpu_signal_handler
+#define cpu_handle_mmu_fault uc32_cpu_handle_mmu_fault
+
+CPUState *uc32_cpu_init(const char *cpu_model);
+int uc32_cpu_exec(CPUState *s);
+int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
+int uc32_cpu_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmuu);
+
+#define CPU_SAVE_VERSION 2
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index(CPUState *env)
+{
+ return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
+}
+
+static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->regs[29] = newsp;
+ }
+ env->regs[0] = 0;
+}
+
+static inline void cpu_set_tls(CPUState *env, target_ulong newtls)
+{
+ env->regs[16] = newtls;
+}
+
+#include "cpu-all.h"
+#include "exec-all.h"
+
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+ env->regs[31] = tb->pc;
+}
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->regs[31];
+ *cs_base = 0;
+ *flags = 0;
+ if ((env->uncached_asr & ASR_M) != ASR_MODE_USER) {
+ *flags |= (1 << 6);
+ }
+}
+
+void uc32_translate_init(void);
+void do_interrupt(CPUState *);
+void switch_mode(CPUState_UniCore32 *, int);
+
+#endif /* __CPU_UC32_H__ */
diff --git a/target-unicore32/exec.h b/target-unicore32/exec.h
new file mode 100644
index 0000000..4ab55f4
--- /dev/null
+++ b/target-unicore32/exec.h
@@ -0,0 +1,50 @@
+/*
+ * UniCore32 execution defines
+ *
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UC32_EXEC_H__
+#define __UC32_EXEC_H__
+
+#include "config.h"
+#include "dyngen-exec.h"
+
+register struct CPUState_UniCore32 *env asm(AREG0);
+
+#include "cpu.h"
+#include "exec-all.h"
+
+static inline void env_to_regs(void)
+{
+}
+
+static inline void regs_to_env(void)
+{
+}
+
+static inline int cpu_has_work(CPUState *env)
+{
+ return env->interrupt_request &
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
+}
+
+static inline int cpu_halted(CPUState *env)
+{
+ if (!env->halted) {
+ return 0;
+ }
+ /* An interrupt wakes the CPU even if the I and R ASR bits are
+ set. We use EXITTB to silently wake CPU without causing an
+ actual interrupt. */
+ if (cpu_has_work(env)) {
+ env->halted = 0;
+ return 0;
+ }
+ return EXCP_HALTED;
+}
+
+#endif /* __UC32_EXEC_H__ */
diff --git a/target-unicore32/helper.c b/target-unicore32/helper.c
new file mode 100644
index 0000000..483aeae
--- /dev/null
+++ b/target-unicore32/helper.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "gdbstub.h"
+#include "helper.h"
+#include "qemu-common.h"
+#include "host-utils.h"
+
+static inline void set_feature(CPUState *env, int feature)
+{
+ env->features |= feature;
+}
+
+struct uc32_cpu_t {
+ uint32_t id;
+ const char *name;
+};
+
+static const struct uc32_cpu_t uc32_cpu_names[] = {
+ { UC32_CPUID_UCV2, "UniCore-II"},
+ { UC32_CPUID_ANY, "any"},
+ { 0, NULL}
+};
+
+/* return 0 if not found */
+static uint32_t uc32_cpu_find_by_name(const char *name)
+{
+ int i;
+ uint32_t id;
+
+ id = 0;
+ for (i = 0; uc32_cpu_names[i].name; i++) {
+ if (strcmp(name, uc32_cpu_names[i].name) == 0) {
+ id = uc32_cpu_names[i].id;
+ break;
+ }
+ }
+ return id;
+}
+
+CPUState *uc32_cpu_init(const char *cpu_model)
+{
+ CPUState *env;
+ uint32_t id;
+ static int inited = 1;
+
+ env = qemu_mallocz(sizeof(CPUState));
+ cpu_exec_init(env);
+
+ id = uc32_cpu_find_by_name(cpu_model);
+ switch (id) {
+ case UC32_CPUID_UCV2:
+ set_feature(env, UC32_HWCAP_CMOV);
+ set_feature(env, UC32_HWCAP_UCF64);
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = 0;
+ env->cp0.c0_cachetype = 0x1dd20d2;
+ env->cp0.c1_sys = 0x00090078;
+ break;
+ case UC32_CPUID_ANY: /* For userspace emulation. */
+ set_feature(env, UC32_HWCAP_CMOV);
+ set_feature(env, UC32_HWCAP_UCF64);
+ break;
+ default:
+ cpu_abort(env, "Bad CPU ID: %x\n", id);
+ }
+
+ env->cpu_model_str = cpu_model;
+ env->cp0.c0_cpuid = id;
+ env->uncached_asr = ASR_MODE_USER;
+ env->regs[31] = 0;
+
+ if (inited) {
+ inited = 0;
+ uc32_translate_init();
+ }
+
+ tlb_flush(env, 1);
+ qemu_init_vcpu(env);
+ return env;
+}
+
+uint32_t HELPER(clo)(uint32_t x)
+{
+ return clo32(x);
+}
+
+uint32_t HELPER(clz)(uint32_t x)
+{
+ return clz32(x);
+}
+
+void do_interrupt(CPUState *env)
+{
+ env->exception_index = -1;
+}
+
+int uc32_cpu_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmmu)
+{
+ env->exception_index = UC32_EXCP_TRAP;
+ env->cp0.c4_faultaddr = address;
+ return 1;
+}
+
+/* These should probably raise undefined insn exceptions. */
+void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
+{
+ int op1 = (insn >> 8) & 0xf;
+ cpu_abort(env, "cp%i insn %08x\n", op1, insn);
+ return;
+}
+
+uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
+{
+ int op1 = (insn >> 8) & 0xf;
+ cpu_abort(env, "cp%i insn %08x\n", op1, insn);
+ return 0;
+}
+
+void HELPER(set_cp0)(CPUState *env, uint32_t insn, uint32_t val)
+{
+ cpu_abort(env, "cp0 insn %08x\n", insn);
+}
+
+uint32_t HELPER(get_cp0)(CPUState *env, uint32_t insn)
+{
+ cpu_abort(env, "cp0 insn %08x\n", insn);
+ return 0;
+}
+
+void switch_mode(CPUState *env, int mode)
+{
+ if (mode != ASR_MODE_USER) {
+ cpu_abort(env, "Tried to switch out of user mode\n");
+ }
+}
+
+void HELPER(set_r29_banked)(CPUState *env, uint32_t mode, uint32_t val)
+{
+ cpu_abort(env, "banked r29 write\n");
+}
+
+uint32_t HELPER(get_r29_banked)(CPUState *env, uint32_t mode)
+{
+ cpu_abort(env, "banked r29 read\n");
+ return 0;
+}
+
+/* UniCore-F64 support. We follow the convention used for F64 instrunctions:
+ Single precition routines have a "s" suffix, double precision a
+ "d" suffix. */
+
+/* Convert host exception flags to f64 form. */
+static inline int ucf64_exceptbits_from_host(int host_bits)
+{
+ int target_bits = 0;
+
+ if (host_bits & float_flag_invalid) {
+ target_bits |= UCF64_FPSCR_FLAG_INVALID;
+ }
+ if (host_bits & float_flag_divbyzero) {
+ target_bits |= UCF64_FPSCR_FLAG_DIVZERO;
+ }
+ if (host_bits & float_flag_overflow) {
+ target_bits |= UCF64_FPSCR_FLAG_OVERFLOW;
+ }
+ if (host_bits & float_flag_underflow) {
+ target_bits |= UCF64_FPSCR_FLAG_UNDERFLOW;
+ }
+ if (host_bits & float_flag_inexact) {
+ target_bits |= UCF64_FPSCR_FLAG_INEXACT;
+ }
+ return target_bits;
+}
+
+uint32_t HELPER(ucf64_get_fpscr)(CPUState *env)
+{
+ int i;
+ uint32_t fpscr;
+
+ fpscr = (env->ucf64.xregs[UC32_UCF64_FPSCR] & UCF64_FPSCR_MASK);
+ i = get_float_exception_flags(&env->ucf64.fp_status);
+ fpscr |= ucf64_exceptbits_from_host(i);
+ return fpscr;
+}
+
+/* Convert ucf64 exception flags to target form. */
+static inline int ucf64_exceptbits_to_host(int target_bits)
+{
+ int host_bits = 0;
+
+ if (target_bits & UCF64_FPSCR_FLAG_INVALID) {
+ host_bits |= float_flag_invalid;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_DIVZERO) {
+ host_bits |= float_flag_divbyzero;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_OVERFLOW) {
+ host_bits |= float_flag_overflow;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_UNDERFLOW) {
+ host_bits |= float_flag_underflow;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_INEXACT) {
+ host_bits |= float_flag_inexact;
+ }
+ return host_bits;
+}
+
+void HELPER(ucf64_set_fpscr)(CPUState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed;
+
+ changed = env->ucf64.xregs[UC32_UCF64_FPSCR];
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK);
+
+ changed ^= val;
+ if (changed & (UCF64_FPSCR_RND_MASK)) {
+ i = UCF64_FPSCR_RND(val);
+ switch (i) {
+ case 0:
+ i = float_round_nearest_even;
+ break;
+ case 1:
+ i = float_round_to_zero;
+ break;
+ case 2:
+ i = float_round_up;
+ break;
+ case 3:
+ i = float_round_down;
+ break;
+ default: /* 100 and 101 not implement */
+ cpu_abort(env, "Unsupported UniCore-F64 round mode");
+ }
+ set_float_rounding_mode(i, &env->ucf64.fp_status);
+ }
+
+ i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val));
+ set_float_exception_flags(i, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_adds)(float32 a, float32 b, CPUState *env)
+{
+ return float32_add(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUState *env)
+{
+ return float64_add(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUState *env)
+{
+ return float32_sub(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_subd)(float64 a, float64 b, CPUState *env)
+{
+ return float64_sub(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUState *env)
+{
+ return float32_mul(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_muld)(float64 a, float64 b, CPUState *env)
+{
+ return float64_mul(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_divs)(float32 a, float32 b, CPUState *env)
+{
+ return float32_div(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUState *env)
+{
+ return float64_div(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_negs)(float32 a)
+{
+ return float32_chs(a);
+}
+
+float64 HELPER(ucf64_negd)(float64 a)
+{
+ return float64_chs(a);
+}
+
+float32 HELPER(ucf64_abss)(float32 a)
+{
+ return float32_abs(a);
+}
+
+float64 HELPER(ucf64_absd)(float64 a)
+{
+ return float64_abs(a);
+}
+
+/* XXX: check quiet/signaling case */
+void HELPER(ucf64_cmps)(float32 a, float32 b, uint32_t c, CPUState *env)
+{
+ int flag;
+ flag = float32_compare_quiet(a, b, &env->ucf64.fp_status);
+ env->CF = 0;
+ switch (c & 0x7) {
+ case 0: /* F */
+ break;
+ case 1: /* UN */
+ if (flag == 2) {
+ env->CF = 1;
+ }
+ break;
+ case 2: /* EQ */
+ if (flag == 0) {
+ env->CF = 1;
+ }
+ break;
+ case 3: /* UEQ */
+ if ((flag == 0) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 4: /* OLT */
+ if (flag == -1) {
+ env->CF = 1;
+ }
+ break;
+ case 5: /* ULT */
+ if ((flag == -1) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 6: /* OLE */
+ if ((flag == -1) || (flag == 0)) {
+ env->CF = 1;
+ }
+ break;
+ case 7: /* ULE */
+ if (flag != 1) {
+ env->CF = 1;
+ }
+ break;
+ }
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29)
+ | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff);
+}
+
+void HELPER(ucf64_cmpd)(float64 a, float64 b, uint32_t c, CPUState *env)
+{
+ int flag;
+ flag = float64_compare_quiet(a, b, &env->ucf64.fp_status);
+ env->CF = 0;
+ switch (c & 0x7) {
+ case 0: /* F */
+ break;
+ case 1: /* UN */
+ if (flag == 2) {
+ env->CF = 1;
+ }
+ break;
+ case 2: /* EQ */
+ if (flag == 0) {
+ env->CF = 1;
+ }
+ break;
+ case 3: /* UEQ */
+ if ((flag == 0) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 4: /* OLT */
+ if (flag == -1) {
+ env->CF = 1;
+ }
+ break;
+ case 5: /* ULT */
+ if ((flag == -1) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 6: /* OLE */
+ if ((flag == -1) || (flag == 0)) {
+ env->CF = 1;
+ }
+ break;
+ case 7: /* ULE */
+ if (flag != 1) {
+ env->CF = 1;
+ }
+ break;
+ }
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29)
+ | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff);
+}
+
+/* Helper routines to perform bitwise copies between float and int. */
+static inline float32 ucf64_itos(uint32_t i)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.i = i;
+ return v.s;
+}
+
+static inline uint32_t ucf64_stoi(float32 s)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.s = s;
+ return v.i;
+}
+
+static inline float64 ucf64_itod(uint64_t i)
+{
+ union {
+ uint64_t i;
+ float64 d;
+ } v;
+
+ v.i = i;
+ return v.d;
+}
+
+static inline uint64_t ucf64_dtoi(float64 d)
+{
+ union {
+ uint64_t i;
+ float64 d;
+ } v;
+
+ v.d = d;
+ return v.i;
+}
+
+/* Integer to float conversion. */
+float32 HELPER(ucf64_si2sf)(float32 x, CPUState *env)
+{
+ return int32_to_float32(ucf64_stoi(x), &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_si2df)(float32 x, CPUState *env)
+{
+ return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status);
+}
+
+/* Float to integer conversion. */
+float32 HELPER(ucf64_sf2si)(float32 x, CPUState *env)
+{
+ return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status));
+}
+
+float32 HELPER(ucf64_df2si)(float64 x, CPUState *env)
+{
+ return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status));
+}
+
+/* floating point conversion */
+float64 HELPER(ucf64_sf2df)(float32 x, CPUState *env)
+{
+ return float32_to_float64(x, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_df2sf)(float64 x, CPUState *env)
+{
+ return float64_to_float32(x, &env->ucf64.fp_status);
+}
diff --git a/target-unicore32/helper.h b/target-unicore32/helper.h
new file mode 100644
index 0000000..615de2a
--- /dev/null
+++ b/target-unicore32/helper.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "def-helper.h"
+
+DEF_HELPER_1(clz, i32, i32)
+DEF_HELPER_1(clo, i32, i32)
+
+DEF_HELPER_1(exception, void, i32)
+
+DEF_HELPER_2(asr_write, void, i32, i32)
+DEF_HELPER_0(asr_read, i32)
+
+DEF_HELPER_3(set_cp0, void, env, i32, i32)
+DEF_HELPER_2(get_cp0, i32, env, i32)
+
+DEF_HELPER_3(set_cp, void, env, i32, i32)
+DEF_HELPER_2(get_cp, i32, env, i32)
+
+DEF_HELPER_1(get_user_reg, i32, i32)
+DEF_HELPER_2(set_user_reg, void, i32, i32)
+
+DEF_HELPER_2(add_cc, i32, i32, i32)
+DEF_HELPER_2(adc_cc, i32, i32, i32)
+DEF_HELPER_2(sub_cc, i32, i32, i32)
+DEF_HELPER_2(sbc_cc, i32, i32, i32)
+
+DEF_HELPER_2(shl, i32, i32, i32)
+DEF_HELPER_2(shr, i32, i32, i32)
+DEF_HELPER_2(sar, i32, i32, i32)
+DEF_HELPER_2(shl_cc, i32, i32, i32)
+DEF_HELPER_2(shr_cc, i32, i32, i32)
+DEF_HELPER_2(sar_cc, i32, i32, i32)
+DEF_HELPER_2(ror_cc, i32, i32, i32)
+
+DEF_HELPER_2(get_r29_banked, i32, env, i32)
+DEF_HELPER_3(set_r29_banked, void, env, i32, i32)
+
+DEF_HELPER_1(ucf64_get_fpscr, i32, env)
+DEF_HELPER_2(ucf64_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(ucf64_adds, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_addd, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_subs, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_subd, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_muls, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_muld, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_divs, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_divd, f64, f64, f64, env)
+DEF_HELPER_1(ucf64_negs, f32, f32)
+DEF_HELPER_1(ucf64_negd, f64, f64)
+DEF_HELPER_1(ucf64_abss, f32, f32)
+DEF_HELPER_1(ucf64_absd, f64, f64)
+DEF_HELPER_4(ucf64_cmps, void, f32, f32, i32, env)
+DEF_HELPER_4(ucf64_cmpd, void, f64, f64, i32, env)
+
+DEF_HELPER_2(ucf64_sf2df, f64, f32, env)
+DEF_HELPER_2(ucf64_df2sf, f32, f64, env)
+
+DEF_HELPER_2(ucf64_si2sf, f32, f32, env)
+DEF_HELPER_2(ucf64_si2df, f64, f32, env)
+
+DEF_HELPER_2(ucf64_sf2si, f32, f32, env)
+DEF_HELPER_2(ucf64_df2si, f32, f64, env)
+
+#include "def-helper.h"
diff --git a/target-unicore32/op_helper.c b/target-unicore32/op_helper.c
new file mode 100644
index 0000000..31e4b11
--- /dev/null
+++ b/target-unicore32/op_helper.c
@@ -0,0 +1,248 @@
+/*
+ * UniCore32 helper routines
+ *
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "exec.h"
+#include "helper.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+void HELPER(exception)(uint32_t excp)
+{
+ env->exception_index = excp;
+ cpu_loop_exit();
+}
+
+static target_ulong asr_read(void)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return env->uncached_asr | (env->NF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3);
+}
+
+target_ulong cpu_asr_read(CPUState *env1)
+{
+ CPUState *saved_env;
+ target_ulong ret;
+
+ saved_env = env;
+ env = env1;
+ ret = asr_read();
+ env = saved_env;
+ return ret;
+}
+
+target_ulong HELPER(asr_read)(void)
+{
+ return asr_read();
+}
+
+static void asr_write(target_ulong val, target_ulong mask)
+{
+ if (mask & ASR_NZCV) {
+ env->ZF = (~val) & ASR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+
+ if ((env->uncached_asr ^ val) & mask & ASR_M) {
+ switch_mode(env, val & ASR_M);
+ }
+ mask &= ~ASR_NZCV;
+ env->uncached_asr = (env->uncached_asr & ~mask) | (val & mask);
+}
+
+void cpu_asr_write(CPUState *env1, target_ulong val, target_ulong mask)
+{
+ CPUState *saved_env;
+
+ saved_env = env;
+ env = env1;
+ asr_write(val, mask);
+ env = saved_env;
+}
+
+void HELPER(asr_write)(target_ulong val, target_ulong mask)
+{
+ asr_write(val, mask);
+}
+
+/* Access to user mode registers from privileged modes. */
+uint32_t HELPER(get_user_reg)(uint32_t regno)
+{
+ uint32_t val;
+
+ if (regno == 29) {
+ val = env->banked_r29[0];
+ } else if (regno == 30) {
+ val = env->banked_r30[0];
+ } else {
+ val = env->regs[regno];
+ }
+ return val;
+}
+
+void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
+{
+ if (regno == 29) {
+ env->banked_r29[0] = val;
+ } else if (regno == 30) {
+ env->banked_r30[0] = val;
+ } else {
+ env->regs[regno] = val;
+ }
+}
+
+/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
+ The only way to do that in TCG is a conditional branch, which clobbers
+ all our temporaries. For now implement these as helper functions. */
+
+uint32_t HELPER(add_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = a + b;
+ env->NF = env->ZF = result;
+ env->CF = result < a;
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a + b;
+ env->CF = result < a;
+ } else {
+ result = a + b + 1;
+ env->CF = result <= a;
+ }
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = a - b;
+ env->NF = env->ZF = result;
+ env->CF = a >= b;
+ env->VF = (a ^ b) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a - b - 1;
+ env->CF = a > b;
+ } else {
+ result = a - b;
+ env->CF = a >= b;
+ }
+ env->VF = (a ^ b) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+/* Similarly for variable shift instructions. */
+
+uint32_t HELPER(shl)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ return 0;
+ }
+ return x << shift;
+}
+
+uint32_t HELPER(shr)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ return 0;
+ }
+ return (uint32_t)x >> shift;
+}
+
+uint32_t HELPER(sar)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ shift = 31;
+ }
+ return (int32_t)x >> shift;
+}
+
+uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32) {
+ env->CF = x & 1;
+ } else {
+ env->CF = 0;
+ }
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (32 - shift)) & 1;
+ return x << shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32) {
+ env->CF = (x >> 31) & 1;
+ } else {
+ env->CF = 0;
+ }
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ env->CF = (x >> 31) & 1;
+ return (int32_t)x >> 31;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return (int32_t)x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
+{
+ int shift1, shift;
+ shift1 = i & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0) {
+ env->CF = (x >> 31) & 1;
+ }
+ return x;
+ } else {
+ env->CF = (x >> (shift - 1)) & 1;
+ return ((uint32_t)x >> shift) | (x << (32 - shift));
+ }
+}
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
new file mode 100644
index 0000000..a6ba991
--- /dev/null
+++ b/target-unicore32/translate.c
@@ -0,0 +1,2105 @@
+/*
+ * UniCore32 translation
+ *
+ * Copyright (C) 2010-2011 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+#include "tcg-op.h"
+#include "qemu-log.h"
+
+#include "helper.h"
+#define GEN_HELPER 1
+#include "helper.h"
+
+/* internal defines */
+typedef struct DisasContext {
+ target_ulong pc;
+ int is_jmp;
+ /* Nonzero if this instruction has been conditionally skipped. */
+ int condjmp;
+ /* The label that will be jumped to when the instruction is skipped. */
+ int condlabel;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+} DisasContext;
+
+#define IS_USER(s) 1
+
+/* These instructions trap after executing, so defer them until after the
+ conditional executions state has been updated. */
+#define DISAS_SYSCALL 5
+
+static TCGv_ptr cpu_env;
+static TCGv_i32 cpu_R[32];
+
+/* FIXME: These should be removed. */
+static TCGv cpu_F0s, cpu_F1s;
+static TCGv_i64 cpu_F0d, cpu_F1d;
+
+#include "gen-icount.h"
+
+static const char *regnames[] = {
+ "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
+ "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
+
+/* initialize TCG globals. */
+void uc32_translate_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+ for (i = 0; i < 32; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, regs[i]), regnames[i]);
+ }
+
+#define GEN_HELPER 2
+#include "helper.h"
+}
+
+static int num_temps;
+
+/* Allocate a temporary variable. */
+static TCGv_i32 new_tmp(void)
+{
+ num_temps++;
+ return tcg_temp_new_i32();
+}
+
+/* Release a temporary variable. */
+static void dead_tmp(TCGv tmp)
+{
+ tcg_temp_free(tmp);
+ num_temps--;
+}
+
+static inline TCGv load_cpu_offset(int offset)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
+
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
+
+static inline void store_cpu_offset(TCGv var, int offset)
+{
+ tcg_gen_st_i32(var, cpu_env, offset);
+ dead_tmp(var);
+}
+
+#define store_cpu_field(var, name) \
+ store_cpu_offset(var, offsetof(CPUState, name))
+
+/* Set a variable to the value of a CPU register. */
+static void load_reg_var(DisasContext *s, TCGv var, int reg)
+{
+ if (reg == 31) {
+ uint32_t addr;
+ /* normaly, since we updated PC */
+ addr = (long)s->pc;
+ tcg_gen_movi_i32(var, addr);
+ } else {
+ tcg_gen_mov_i32(var, cpu_R[reg]);
+ }
+}
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv load_reg(DisasContext *s, int reg)
+{
+ TCGv tmp = new_tmp();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+/* Set a CPU register. The source must be a temporary and will be
+ marked as dead. */
+static void store_reg(DisasContext *s, int reg, TCGv var)
+{
+ if (reg == 31) {
+ tcg_gen_andi_i32(var, var, ~3);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_gen_mov_i32(cpu_R[reg], var);
+ dead_tmp(var);
+}
+
+/* Value extensions. */
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
+#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
+#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
+
+#define UCOP_REG_M (((insn) >> 0) & 0x1f)
+#define UCOP_REG_N (((insn) >> 19) & 0x1f)
+#define UCOP_REG_D (((insn) >> 14) & 0x1f)
+#define UCOP_REG_S (((insn) >> 9) & 0x1f)
+#define UCOP_REG_LO (((insn) >> 14) & 0x1f)
+#define UCOP_REG_HI (((insn) >> 9) & 0x1f)
+#define UCOP_SH_OP (((insn) >> 6) & 0x03)
+#define UCOP_SH_IM (((insn) >> 9) & 0x1f)
+#define UCOP_OPCODES (((insn) >> 25) & 0x0f)
+#define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
+#define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
+#define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
+#define UCOP_COND (((insn) >> 25) & 0x0f)
+#define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
+#define UCOP_CPNUM (((insn) >> 10) & 0x0f)
+#define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
+#define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
+#define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
+
+#define UCOP_SET(i) ((insn) & (1 << (i)))
+#define UCOP_SET_P UCOP_SET(28)
+#define UCOP_SET_U UCOP_SET(27)
+#define UCOP_SET_B UCOP_SET(26)
+#define UCOP_SET_W UCOP_SET(25)
+#define UCOP_SET_L UCOP_SET(24)
+#define UCOP_SET_S UCOP_SET(24)
+
+#define ILLEGAL cpu_abort(env, \
+ "Illegal UniCore32 instruction %x at line %d!", \
+ insn, __LINE__)
+
+static inline void gen_set_asr(TCGv var, uint32_t mask)
+{
+ TCGv tmp_mask = tcg_const_i32(mask);
+ gen_helper_asr_write(var, tmp_mask);
+ tcg_temp_free_i32(tmp_mask);
+}
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
+
+static void gen_exception(int excp)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, excp);
+ gen_helper_exception(tmp);
+ dead_tmp(tmp);
+}
+
+/* FIXME: Most targets have native widening multiplication.
+ It would be good to use that instead of a full wide multiply. */
+/* 32x32->64 multiply. Marks inputs as dead. */
+static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
+{
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp1, a);
+ dead_tmp(a);
+ tcg_gen_extu_i32_i64(tmp2, b);
+ dead_tmp(b);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ return tmp1;
+}
+
+static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
+{
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(tmp1, a);
+ dead_tmp(a);
+ tcg_gen_ext_i32_i64(tmp2, b);
+ dead_tmp(b);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ tcg_temp_free_i64(tmp2);
+ return tmp1;
+}
+
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
+
+/* Set CF to the top bit of var. */
+static void gen_set_CF_bit31(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 31);
+ gen_set_CF(tmp);
+ dead_tmp(tmp);
+}
+
+/* Set N and Z flags from var. */
+static inline void gen_logic_CC(TCGv var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
+}
+
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_add_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ dead_tmp(tmp);
+}
+
+/* dest = T0 - T1 + CF - 1. */
+static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_sub_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ tcg_gen_subi_i32(dest, dest, 1);
+ dead_tmp(tmp);
+}
+
+static void shifter_out_im(TCGv var, int shift)
+{
+ TCGv tmp = new_tmp();
+ if (shift == 0) {
+ tcg_gen_andi_i32(tmp, var, 1);
+ } else {
+ tcg_gen_shri_i32(tmp, var, shift);
+ if (shift != 31) {
+ tcg_gen_andi_i32(tmp, tmp, 1);
+ }
+ }
+ gen_set_CF(tmp);
+ dead_tmp(tmp);
+}
+
+/* Shift by immediate. Includes special handling for shift == 0. */
+static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
+ int flags)
+{
+ switch (shiftop) {
+ case 0: /* LSL */
+ if (shift != 0) {
+ if (flags) {
+ shifter_out_im(var, 32 - shift);
+ }
+ tcg_gen_shli_i32(var, var, shift);
+ }
+ break;
+ case 1: /* LSR */
+ if (shift == 0) {
+ if (flags) {
+ tcg_gen_shri_i32(var, var, 31);
+ gen_set_CF(var);
+ }
+ tcg_gen_movi_i32(var, 0);
+ } else {
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ tcg_gen_shri_i32(var, var, shift);
+ }
+ break;
+ case 2: /* ASR */
+ if (shift == 0) {
+ shift = 32;
+ }
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ if (shift == 32) {
+ shift = 31;
+ }
+ tcg_gen_sari_i32(var, var, shift);
+ break;
+ case 3: /* ROR/RRX */
+ if (shift != 0) {
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ tcg_gen_rotri_i32(var, var, shift); break;
+ } else {
+ TCGv tmp = load_cpu_field(CF);
+ if (flags) {
+ shifter_out_im(var, 0);
+ }
+ tcg_gen_shri_i32(var, var, 1);
+ tcg_gen_shli_i32(tmp, tmp, 31);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+ }
+ }
+};
+
+static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
+ TCGv shift, int flags)
+{
+ if (flags) {
+ switch (shiftop) {
+ case 0:
+ gen_helper_shl_cc(var, var, shift);
+ break;
+ case 1:
+ gen_helper_shr_cc(var, var, shift);
+ break;
+ case 2:
+ gen_helper_sar_cc(var, var, shift);
+ break;
+ case 3:
+ gen_helper_ror_cc(var, var, shift);
+ break;
+ }
+ } else {
+ switch (shiftop) {
+ case 0:
+ gen_helper_shl(var, var, shift);
+ break;
+ case 1:
+ gen_helper_shr(var, var, shift);
+ break;
+ case 2:
+ gen_helper_sar(var, var, shift);
+ break;
+ case 3:
+ tcg_gen_andi_i32(shift, shift, 0x1f);
+ tcg_gen_rotr_i32(var, var, shift);
+ break;
+ }
+ }
+ dead_tmp(shift);
+}
+
+static void gen_test_cc(int cc, int label)
+{
+ TCGv tmp;
+ TCGv tmp2;
+ int inv;
+
+ switch (cc) {
+ case 0: /* eq: Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 1: /* ne: !Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 2: /* cs: C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 3: /* cc: !C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 4: /* mi: N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 5: /* pl: !N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 6: /* vs: V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 7: /* vc: !V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 8: /* hi: C && !Z */
+ inv = gen_new_label();
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 9: /* ls: !C || Z */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 10: /* ge: N == V -> N ^ V == 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 11: /* lt: N != V -> N ^ V != 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 12: /* gt: !Z && N == V */
+ inv = gen_new_label();
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 13: /* le: Z || N != V */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ default:
+ fprintf(stderr, "Bad condition code 0x%x\n", cc);
+ abort();
+ }
+ dead_tmp(tmp);
+}
+
+static const uint8_t table_logic_cc[16] = {
+ 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
+ 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
+ 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
+ 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
+};
+
+/* Set PC state from an immediate address. */
+static inline void gen_bx_im(DisasContext *s, uint32_t addr)
+{
+ s->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_i32(cpu_R[31], addr & ~3);
+}
+
+/* Set PC state from var. var is marked as dead. */
+static inline void gen_bx(DisasContext *s, TCGv var)
+{
+ s->is_jmp = DISAS_UPDATE;
+ tcg_gen_andi_i32(cpu_R[31], var, ~3);
+ dead_tmp(var);
+}
+
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
+{
+ store_reg(s, reg, var);
+}
+
+static inline TCGv gen_ld8s(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld8u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8u(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld16s(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld16u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16u(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld32(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv_i64 gen_ld64(TCGv addr, int index)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(tmp, addr, index);
+ return tmp;
+}
+
+static inline void gen_st8(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st8(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_st16(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st16(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_st32(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st32(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st64(val, addr, index);
+ tcg_temp_free_i64(val);
+}
+
+static inline void gen_set_pc_im(uint32_t val)
+{
+ tcg_gen_movi_i32(cpu_R[31], val);
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+static inline void gen_lookup_tb(DisasContext *s)
+{
+ tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
+ TCGv var)
+{
+ int val;
+ TCGv offset;
+
+ if (UCOP_SET(29)) {
+ /* immediate */
+ val = UCOP_IMM14;
+ if (!UCOP_SET_U) {
+ val = -val;
+ }
+ if (val != 0) {
+ tcg_gen_addi_i32(var, var, val);
+ }
+ } else {
+ /* shift/register */
+ offset = load_reg(s, UCOP_REG_M);
+ gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
+ if (!UCOP_SET_U) {
+ tcg_gen_sub_i32(var, var, offset);
+ } else {
+ tcg_gen_add_i32(var, var, offset);
+ }
+ dead_tmp(offset);
+ }
+}
+
+static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
+ TCGv var)
+{
+ int val;
+ TCGv offset;
+
+ if (UCOP_SET(26)) {
+ /* immediate */
+ val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
+ if (!UCOP_SET_U) {
+ val = -val;
+ }
+ if (val != 0) {
+ tcg_gen_addi_i32(var, var, val);
+ }
+ } else {
+ /* register */
+ offset = load_reg(s, UCOP_REG_M);
+ if (!UCOP_SET_U) {
+ tcg_gen_sub_i32(var, var, offset);
+ } else {
+ tcg_gen_add_i32(var, var, offset);
+ }
+ dead_tmp(offset);
+ }
+}
+
+static inline long ucf64_reg_offset(int reg)
+{
+ if (reg & 1) {
+ return offsetof(CPUState, ucf64.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.upper);
+ } else {
+ return offsetof(CPUState, ucf64.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.lower);
+ }
+}
+
+#define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
+#define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
+
+/* UniCore-F64 single load/store I_offset */
+static void do_ucf64_ldst_i(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ int offset;
+ TCGv tmp;
+ TCGv addr;
+
+ addr = load_reg(s, UCOP_REG_N);
+ if (!UCOP_SET_P && !UCOP_SET_W) {
+ ILLEGAL;
+ }
+
+ if (UCOP_SET_P) {
+ offset = UCOP_IMM10 << 2;
+ if (!UCOP_SET_U) {
+ offset = -offset;
+ }
+ if (offset != 0) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ }
+
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, IS_USER(s));
+ ucf64_gen_st32(tmp, UCOP_REG_D);
+ } else { /* store */
+ tmp = ucf64_gen_ld32(UCOP_REG_D);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+
+ if (!UCOP_SET_P) {
+ offset = UCOP_IMM10 << 2;
+ if (!UCOP_SET_U) {
+ offset = -offset;
+ }
+ if (offset != 0) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ }
+ if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+}
+
+/* UniCore-F64 load/store multiple words */
+static void do_ucf64_ldst_m(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int i;
+ int j, n, freg;
+ TCGv tmp;
+ TCGv addr;
+
+ if (UCOP_REG_D != 0) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_N == 31) {
+ ILLEGAL;
+ }
+ if ((insn << 24) == 0) {
+ ILLEGAL;
+ }
+
+ addr = load_reg(s, UCOP_REG_N);
+
+ n = 0;
+ for (i = 0; i < 8; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) { /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } /* unnecessary to do anything when post increment */
+ } else {
+ if (UCOP_SET_P) { /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ } else { /* post decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ }
+ }
+
+ freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
+
+ for (i = 0, j = 0; i < 8; i++, freg++) {
+ if (!UCOP_SET(i)) {
+ continue;
+ }
+
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, IS_USER(s));
+ ucf64_gen_st32(tmp, freg);
+ } else { /* store */
+ tmp = ucf64_gen_ld32(freg);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+
+ j++;
+ /* unnecessary to add after the last transfer */
+ if (j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ if (UCOP_SET_W) { /* write back */
+ if (UCOP_SET_U) {
+ if (!UCOP_SET_P) { /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } /* unnecessary to do anything when pre increment */
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ }
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+}
+
+/* UniCore-F64 mrc/mcr */
+static void do_ucf64_trans(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv tmp;
+
+ if ((insn & 0xfe0003ff) == 0xe2000000) {
+ /* control register */
+ if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
+ ILLEGAL;
+ }
+ if (UCOP_SET(24)) {
+ /* CFF */
+ tmp = new_tmp();
+ gen_helper_ucf64_get_fpscr(tmp, cpu_env);
+ store_reg(s, UCOP_REG_D, tmp);
+ } else {
+ /* CTF */
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_helper_ucf64_set_fpscr(cpu_env, tmp);
+ dead_tmp(tmp);
+ gen_lookup_tb(s);
+ }
+ return;
+ }
+ if ((insn & 0xfe0003ff) == 0xe0000000) {
+ /* general register */
+ if (UCOP_REG_D == 31) {
+ ILLEGAL;
+ }
+ if (UCOP_SET(24)) { /* MFF */
+ tmp = ucf64_gen_ld32(UCOP_REG_N);
+ store_reg(s, UCOP_REG_D, tmp);
+ } else { /* MTF */
+ tmp = load_reg(s, UCOP_REG_D);
+ ucf64_gen_st32(tmp, UCOP_REG_N);
+ }
+ return;
+ }
+ if ((insn & 0xfb000000) == 0xe9000000) {
+ /* MFFC */
+ if (UCOP_REG_D != 31) {
+ ILLEGAL;
+ }
+ if (UCOP_UCF64_COND & 0x8) {
+ ILLEGAL;
+ }
+
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
+ if (UCOP_SET(26)) {
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
+ } else {
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
+ }
+ dead_tmp(tmp);
+ return;
+ }
+ ILLEGAL;
+}
+
+/* UniCore-F64 convert instructions */
+static void do_ucf64_fcvt(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ if (UCOP_UCF64_FMT == 3) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_N != 0) {
+ ILLEGAL;
+ }
+ switch (UCOP_UCF64_FUNC) {
+ case 0: /* cvt.s */
+ switch (UCOP_UCF64_FMT) {
+ case 1 /* d */:
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 2 /* w */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* s */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ case 1: /* cvt.d */
+ switch (UCOP_UCF64_FMT) {
+ case 0 /* s */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
+ tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 2 /* w */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
+ tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* d */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ case 4: /* cvt.w */
+ switch (UCOP_UCF64_FMT) {
+ case 0 /* s */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 1 /* d */:
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* w */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ default:
+ ILLEGAL;
+ }
+}
+
+/* UniCore-F64 compare instructions */
+static void do_ucf64_fcmp(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ if (UCOP_SET(25)) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_D != 0) {
+ ILLEGAL;
+ }
+
+ ILLEGAL; /* TODO */
+ if (UCOP_SET(24)) {
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
+ } else {
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
+ }
+}
+
+#define gen_helper_ucf64_movs(x, y) do { } while (0)
+#define gen_helper_ucf64_movd(x, y) do { } while (0)
+
+#define UCF64_OP1(name) do { \
+ if (UCOP_REG_N != 0) { \
+ ILLEGAL; \
+ } \
+ switch (UCOP_UCF64_FMT) { \
+ case 0 /* s */: \
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
+ tcg_gen_st_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 1 /* d */: \
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
+ tcg_gen_st_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 2 /* w */: \
+ ILLEGAL; \
+ break; \
+ } \
+ } while (0)
+
+#define UCF64_OP2(name) do { \
+ switch (UCOP_UCF64_FMT) { \
+ case 0 /* s */: \
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_N)); \
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##s(cpu_F0s, \
+ cpu_F0s, cpu_F1s, cpu_env); \
+ tcg_gen_st_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 1 /* d */: \
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_N)); \
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##d(cpu_F0d, \
+ cpu_F0d, cpu_F1d, cpu_env); \
+ tcg_gen_st_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 2 /* w */: \
+ ILLEGAL; \
+ break; \
+ } \
+ } while (0)
+
+/* UniCore-F64 data processing */
+static void do_ucf64_datap(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ if (UCOP_UCF64_FMT == 3) {
+ ILLEGAL;
+ }
+ switch (UCOP_UCF64_FUNC) {
+ case 0: /* add */
+ UCF64_OP2(add);
+ break;
+ case 1: /* sub */
+ UCF64_OP2(sub);
+ break;
+ case 2: /* mul */
+ UCF64_OP2(mul);
+ break;
+ case 4: /* div */
+ UCF64_OP2(div);
+ break;
+ case 5: /* abs */
+ UCF64_OP1(abs);
+ break;
+ case 6: /* mov */
+ UCF64_OP1(mov);
+ break;
+ case 7: /* neg */
+ UCF64_OP1(neg);
+ break;
+ default:
+ ILLEGAL;
+ }
+}
+
+/* Disassemble an F64 instruction */
+static void disas_ucf64_insn(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ if (!UCOP_SET(29)) {
+ if (UCOP_SET(26)) {
+ do_ucf64_ldst_m(env, s, insn);
+ } else {
+ do_ucf64_ldst_i(env, s, insn);
+ }
+ } else {
+ if (UCOP_SET(5)) {
+ switch ((insn >> 26) & 0x3) {
+ case 0:
+ do_ucf64_datap(env, s, insn);
+ break;
+ case 1:
+ ILLEGAL;
+ break;
+ case 2:
+ do_ucf64_fcvt(env, s, insn);
+ break;
+ case 3:
+ do_ucf64_fcmp(env, s, insn);
+ break;
+ }
+ } else {
+ do_ucf64_trans(env, s, insn);
+ }
+ }
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
+{
+ TranslationBlock *tb;
+
+ tb = s->tb;
+ if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+ tcg_gen_goto_tb(n);
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb((long)tb + n);
+ } else {
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void gen_jmp(DisasContext *s, uint32_t dest)
+{
+ if (unlikely(s->singlestep_enabled)) {
+ /* An indirect jump so that we still trigger the debug exception. */
+ gen_bx_im(s, dest);
+ } else {
+ gen_goto_tb(s, 0, dest);
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+}
+
+static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
+{
+ if (x) {
+ tcg_gen_sari_i32(t0, t0, 16);
+ } else {
+ gen_sxth(t0);
+ }
+ if (y) {
+ tcg_gen_sari_i32(t1, t1, 16);
+ } else {
+ gen_sxth(t1);
+ }
+ tcg_gen_mul_i32(t0, t0, t1);
+}
+
+/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
+static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
+{
+ TCGv tmp;
+ if (bsr) {
+ /* ??? This is also undefined in system mode. */
+ if (IS_USER(s)) {
+ return 1;
+ }
+
+ tmp = load_cpu_field(bsr);
+ tcg_gen_andi_i32(tmp, tmp, ~mask);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_or_i32(tmp, tmp, t0);
+ store_cpu_field(tmp, bsr);
+ } else {
+ gen_set_asr(t0, mask);
+ }
+ dead_tmp(t0);
+ gen_lookup_tb(s);
+ return 0;
+}
+
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv pc)
+{
+ TCGv tmp;
+ store_reg(s, 31, pc);
+ tmp = load_cpu_field(bsr);
+ gen_set_asr(tmp, 0xffffffff);
+ dead_tmp(tmp);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static void disas_coproc_insn(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ switch (UCOP_CPNUM) {
+ case 2:
+ disas_ucf64_insn(env, s, insn);
+ break;
+ default:
+ /* Unknown coprocessor. */
+ cpu_abort(env, "Unknown coprocessor!");
+ }
+}
+
+
+/* Store a 64-bit value to a register pair. Clobbers val. */
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
+{
+ TCGv tmp;
+ tmp = new_tmp();
+ tcg_gen_trunc_i64_i32(tmp, val);
+ store_reg(s, rlow, tmp);
+ tmp = new_tmp();
+ tcg_gen_shri_i64(val, val, 32);
+ tcg_gen_trunc_i64_i32(tmp, val);
+ store_reg(s, rhigh, tmp);
+}
+
+/* load and add a 64-bit value from a register pair. */
+static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
+{
+ TCGv_i64 tmp;
+ TCGv tmpl;
+ TCGv tmph;
+
+ /* Load 64-bit value rd:rn. */
+ tmpl = load_reg(s, rlow);
+ tmph = load_reg(s, rhigh);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
+ dead_tmp(tmpl);
+ dead_tmp(tmph);
+ tcg_gen_add_i64(val, val, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* data processing instructions */
+static void do_datap(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv tmp;
+ TCGv tmp2;
+ int logic_cc;
+
+ if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
+ if (UCOP_SET(23)) { /* CMOV instructions */
+ if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
+ ILLEGAL;
+ }
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ s->condlabel = gen_new_label();
+ gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+ }
+
+ logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
+
+ if (UCOP_SET(29)) {
+ unsigned int val;
+ /* immediate operand */
+ val = UCOP_IMM_9;
+ if (UCOP_SH_IM) {
+ val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
+ }
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, val);
+ if (logic_cc && UCOP_SH_IM) {
+ gen_set_CF_bit31(tmp2);
+ }
+ } else {
+ /* register */
+ tmp2 = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET(5)) {
+ tmp = load_reg(s, UCOP_REG_S);
+ gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
+ } else {
+ gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
+ }
+ }
+
+ if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
+ tmp = load_reg(s, UCOP_REG_N);
+ } else {
+ TCGV_UNUSED(tmp);
+ }
+
+ switch (UCOP_OPCODES) {
+ case 0x00:
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x01:
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x02:
+ if (UCOP_SET_S && UCOP_REG_D == 31) {
+ /* SUBS r31, ... is used for exception return. */
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_exception_return(s, tmp);
+ } else {
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ }
+ break;
+ case 0x03:
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, tmp2, tmp);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x04:
+ if (UCOP_SET_S) {
+ gen_helper_add_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x05:
+ if (UCOP_SET_S) {
+ gen_helper_adc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_add_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x06:
+ if (UCOP_SET_S) {
+ gen_helper_sbc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_sub_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x07:
+ if (UCOP_SET_S) {
+ gen_helper_sbc_cc(tmp, tmp2, tmp);
+ } else {
+ gen_sub_carry(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x08:
+ if (UCOP_SET_S) {
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x09:
+ if (UCOP_SET_S) {
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0a:
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0b:
+ if (UCOP_SET_S) {
+ gen_helper_add_cc(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0c:
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x0d:
+ if (logic_cc && UCOP_REG_D == 31) {
+ /* MOVS r31, ... is used for exception return. */
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ gen_exception_return(s, tmp2);
+ } else {
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp2);
+ }
+ break;
+ case 0x0e:
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ default:
+ case 0x0f:
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp2);
+ break;
+ }
+ if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
+ dead_tmp(tmp2);
+ }
+}
+
+/* multiply */
+static void do_mult(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv_i64 tmp64;
+
+ if (UCOP_SET(27)) {
+ /* 64 bit mul */
+ tmp = load_reg(s, UCOP_REG_M);
+ tmp2 = load_reg(s, UCOP_REG_N);
+ if (UCOP_SET(26)) {
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ } else {
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
+ }
+ if (UCOP_SET(25)) { /* mult accumulate */
+ gen_addq(s, tmp64, UCOP_REG_LO, UCOP_REG_HI);
+ }
+ gen_storeq_reg(s, UCOP_REG_LO, UCOP_REG_HI, tmp64);
+ tcg_temp_free_i64(tmp64);
+ } else {
+ /* 32 bit mul */
+ tmp = load_reg(s, UCOP_REG_M);
+ tmp2 = load_reg(s, UCOP_REG_N);
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (UCOP_SET(25)) {
+ /* Add */
+ tmp2 = load_reg(s, UCOP_REG_S);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ if (UCOP_SET_S) {
+ gen_logic_CC(tmp);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+}
+
+/* miscellaneous instructions */
+static void do_misc(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int val;
+ TCGv tmp;
+
+ if ((insn & 0xffffffe0) == 0x10ffc120) {
+ /* Trivial implementation equivalent to bx. */
+ tmp = load_reg(s, UCOP_REG_M);
+ gen_bx(s, tmp);
+ return;
+ }
+
+ if ((insn & 0xfbffc000) == 0x30ffc000) {
+ /* PSR = immediate */
+ val = UCOP_IMM_9;
+ if (UCOP_SH_IM) {
+ val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
+ }
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbffffe0) == 0x12ffc020) {
+ /* PSR.flag = reg */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbffffe0) == 0x10ffc020) {
+ /* PSR = reg */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbf83fff) == 0x10f80000) {
+ /* reg = PSR */
+ if (UCOP_SET_B) {
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ tmp = load_cpu_field(bsr);
+ } else {
+ tmp = new_tmp();
+ gen_helper_asr_read(tmp);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ return;
+ }
+
+ if ((insn & 0xfbf83fe0) == 0x12f80120) {
+ /* clz */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET(26)) {
+ gen_helper_clo(tmp, tmp);
+ } else {
+ gen_helper_clz(tmp, tmp);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ return;
+ }
+
+ /* otherwise */
+ ILLEGAL;
+}
+
+/* load/store I_offset and R_offset */
+static void do_ldst_ir(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int i;
+ TCGv tmp;
+ TCGv tmp2;
+
+ tmp2 = load_reg(s, UCOP_REG_N);
+ i = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
+
+ /* immediate */
+ if (UCOP_SET_P) {
+ gen_add_data_offset(s, insn, tmp2);
+ }
+
+ if (UCOP_SET_L) {
+ /* load */
+ if (UCOP_SET_B) {
+ tmp = gen_ld8u(tmp2, i);
+ } else {
+ tmp = gen_ld32(tmp2, i);
+ }
+ } else {
+ /* store */
+ tmp = load_reg(s, UCOP_REG_D);
+ if (UCOP_SET_B) {
+ gen_st8(tmp, tmp2, i);
+ } else {
+ gen_st32(tmp, tmp2, i);
+ }
+ }
+ if (!UCOP_SET_P) {
+ gen_add_data_offset(s, insn, tmp2);
+ store_reg(s, UCOP_REG_N, tmp2);
+ } else if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, tmp2);
+ } else {
+ dead_tmp(tmp2);
+ }
+ if (UCOP_SET_L) {
+ /* Complete the load. */
+ if (UCOP_REG_D == 31) {
+ gen_bx(s, tmp);
+ } else {
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+ }
+}
+
+/* SWP instruction */
+static void do_swap(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv addr;
+ TCGv tmp;
+ TCGv tmp2;
+
+ if ((insn & 0xff003fe0) != 0x40000120) {
+ ILLEGAL;
+ }
+
+ /* ??? This is not really atomic. However we know
+ we never have multiple CPUs running in parallel,
+ so it is good enough. */
+ addr = load_reg(s, UCOP_REG_N);
+ tmp = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET_B) {
+ tmp2 = gen_ld8u(addr, IS_USER(s));
+ gen_st8(tmp, addr, IS_USER(s));
+ } else {
+ tmp2 = gen_ld32(addr, IS_USER(s));
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ dead_tmp(addr);
+ store_reg(s, UCOP_REG_D, tmp2);
+}
+
+/* load/store hw/sb */
+static void do_ldst_hwsb(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv addr;
+ TCGv tmp;
+
+ if (UCOP_SH_OP == 0) {
+ do_swap(env, s, insn);
+ return;
+ }
+
+ addr = load_reg(s, UCOP_REG_N);
+ if (UCOP_SET_P) {
+ gen_add_datah_offset(s, insn, addr);
+ }
+
+ if (UCOP_SET_L) { /* load */
+ switch (UCOP_SH_OP) {
+ case 1:
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ case 2:
+ tmp = gen_ld8s(addr, IS_USER(s));
+ break;
+ default: /* see do_swap */
+ case 3:
+ tmp = gen_ld16s(addr, IS_USER(s));
+ break;
+ }
+ } else { /* store */
+ if (UCOP_SH_OP != 1) {
+ ILLEGAL;
+ }
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_st16(tmp, addr, IS_USER(s));
+ }
+ /* Perform base writeback before the loaded value to
+ ensure correct behavior with overlapping index registers. */
+ if (!UCOP_SET_P) {
+ gen_add_datah_offset(s, insn, addr);
+ store_reg(s, UCOP_REG_N, addr);
+ } else if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ if (UCOP_SET_L) {
+ /* Complete the load. */
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+}
+
+/* load/store multiple words */
+static void do_ldst_m(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int val, i;
+ int j, n, reg, user, loaded_base;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv addr;
+ TCGv loaded_var;
+
+ if (UCOP_SET(7)) {
+ ILLEGAL;
+ }
+ /* XXX: store correct base if write back */
+ user = 0;
+ if (UCOP_SET_B) { /* S bit in instruction table */
+ if (IS_USER(s)) {
+ ILLEGAL; /* only usable in supervisor mode */
+ }
+ if (UCOP_SET(18) == 0) { /* pc reg */
+ user = 1;
+ }
+ }
+
+ addr = load_reg(s, UCOP_REG_N);
+
+ /* compute total size */
+ loaded_base = 0;
+ TCGV_UNUSED(loaded_var);
+ n = 0;
+ for (i = 0; i < 6; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+ for (i = 9; i < 19; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+ /* XXX: test invalid n == 0 case ? */
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) {
+ /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* post increment */
+ }
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ } else {
+ /* post decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ }
+ }
+
+ j = 0;
+ reg = UCOP_SET(6) ? 16 : 0;
+ for (i = 0; i < 19; i++, reg++) {
+ if (i == 6) {
+ i = i + 3;
+ }
+ if (UCOP_SET(i)) {
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, IS_USER(s));
+ if (reg == 31) {
+ gen_bx(s, tmp);
+ } else if (user) {
+ tmp2 = tcg_const_i32(reg);
+ gen_helper_set_user_reg(tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ dead_tmp(tmp);
+ } else if (reg == UCOP_REG_N) {
+ loaded_var = tmp;
+ loaded_base = 1;
+ } else {
+ store_reg(s, reg, tmp);
+ }
+ } else { /* store */
+ if (reg == 31) {
+ /* special case: r31 = PC + 4 */
+ val = (long)s->pc;
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ } else if (user) {
+ tmp = new_tmp();
+ tmp2 = tcg_const_i32(reg);
+ gen_helper_get_user_reg(tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ tmp = load_reg(s, reg);
+ }
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ j++;
+ /* no need to add after the last transfer */
+ if (j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+ }
+ if (UCOP_SET_W) { /* write back */
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) {
+ /* pre increment */
+ } else {
+ /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ }
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ if (loaded_base) {
+ store_reg(s, UCOP_REG_N, loaded_var);
+ }
+ if (UCOP_SET_B && !user) {
+ /* Restore ASR from BSR. */
+ tmp = load_cpu_field(bsr);
+ gen_set_asr(tmp, 0xffffffff);
+ dead_tmp(tmp);
+ s->is_jmp = DISAS_UPDATE;
+ }
+}
+
+/* branch (and link) */
+static void do_branch(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int val;
+ int32_t offset;
+ TCGv tmp;
+
+ if (UCOP_COND == 0xf) {
+ ILLEGAL;
+ }
+
+ if (UCOP_COND != 0xe) {
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ s->condlabel = gen_new_label();
+ gen_test_cc(UCOP_COND ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+
+ val = (int32_t)s->pc;
+ if (UCOP_SET_L) {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 30, tmp);
+ }
+ offset = (((int32_t)insn << 8) >> 8);
+ val += (offset << 2); /* unicore is pc+4 */
+ gen_jmp(s, val);
+}
+
+static void disas_uc32_insn(CPUState *env, DisasContext *s)
+{
+ unsigned int insn;
+
+ insn = ldl_code(s->pc);
+ s->pc += 4;
+
+ /* UniCore instructions class:
+ * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
+ * AAA : see switch case
+ * BBBB : opcodes or cond or PUBW
+ * C : S OR L
+ * D : 8
+ * E : 5
+ */
+ switch (insn >> 29) {
+ case 0b000:
+ if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
+ do_mult(env, s, insn);
+ break;
+ }
+
+ if (UCOP_SET(8)) {
+ do_misc(env, s, insn);
+ break;
+ }
+ case 0b001:
+ if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
+ do_misc(env, s, insn);
+ break;
+ }
+ do_datap(env, s, insn);
+ break;
+
+ case 0b010:
+ if (UCOP_SET(8) && UCOP_SET(5)) {
+ do_ldst_hwsb(env, s, insn);
+ break;
+ }
+ if (UCOP_SET(8) || UCOP_SET(5)) {
+ ILLEGAL;
+ }
+ case 0b011:
+ do_ldst_ir(env, s, insn);
+ break;
+
+ case 0b100:
+ if (UCOP_SET(8)) {
+ ILLEGAL; /* extended instructions */
+ }
+ do_ldst_m(env, s, insn);
+ break;
+ case 0b101:
+ do_branch(env, s, insn);
+ break;
+ case 0b110:
+ /* Coprocessor. */
+ disas_coproc_insn(env, s, insn);
+ break;
+ case 0b111:
+ if (!UCOP_SET(28)) {
+ disas_coproc_insn(env, s, insn);
+ break;
+ }
+ if ((insn & 0xff000000) == 0xff000000) { /* syscall */
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_SYSCALL;
+ break;
+ }
+ ILLEGAL;
+ }
+
+ return;
+}
+
+/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
+ basic block 'tb'. If search_pc is TRUE, also generate PC
+ information for each intermediate instruction. */
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb, int search_pc)
+{
+ DisasContext dc1, *dc = &dc1;
+ CPUBreakpoint *bp;
+ uint16_t *gen_opc_end;
+ int j, lj;
+ target_ulong pc_start;
+ uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
+
+ /* generate intermediate code */
+ num_temps = 0;
+
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = env->singlestep_enabled;
+ dc->condjmp = 0;
+ cpu_F0s = tcg_temp_new_i32();
+ cpu_F1s = tcg_temp_new_i32();
+ cpu_F0d = tcg_temp_new_i64();
+ cpu_F1d = tcg_temp_new_i64();
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ lj = -1;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+
+ gen_icount_start();
+ do {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ gen_set_pc_im(dc->pc);
+ gen_exception(EXCP_DEBUG);
+ dc->is_jmp = DISAS_JUMP;
+ /* Advance PC so that clearing the breakpoint will
+ invalidate this TB. */
+ dc->pc += 2; /* FIXME */
+ goto done_generating;
+ break;
+ }
+ }
+ }
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ }
+ gen_opc_pc[lj] = dc->pc;
+ gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
+ }
+
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ disas_uc32_insn(env, dc);
+
+ if (num_temps) {
+ fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
+ num_temps = 0;
+ }
+
+ if (dc->condjmp && !dc->is_jmp) {
+ gen_set_label(dc->condlabel);
+ dc->condjmp = 0;
+ }
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place. */
+ num_insns++;
+ } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
+ !env->singlestep_enabled &&
+ !singlestep &&
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(env, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
+
+ /* At this stage dc->condjmp will only be set when the skipped
+ instruction was a conditional branch or trap, and the PC has
+ already been written. */
+ if (unlikely(env->singlestep_enabled)) {
+ /* Make sure the pc is updated, and raise a debug exception. */
+ if (dc->condjmp) {
+ if (dc->is_jmp == DISAS_SYSCALL) {
+ gen_exception(UC32_EXCP_PRIV);
+ } else {
+ gen_exception(EXCP_DEBUG);
+ }
+ gen_set_label(dc->condlabel);
+ }
+ if (dc->condjmp || !dc->is_jmp) {
+ gen_set_pc_im(dc->pc);
+ dc->condjmp = 0;
+ }
+ if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
+ gen_exception(UC32_EXCP_PRIV);
+ } else {
+ gen_exception(EXCP_DEBUG);
+ }
+ } else {
+ /* While branches must always occur at the end of an IT block,
+ there are a few other things that can cause us to terminate
+ the TB in the middel of an IT block:
+ - Exception generating instructions (bkpt, swi, undefined).
+ - Page boundaries.
+ - Hardware watchpoints.
+ Hardware breakpoints have already been handled and skip this code.
+ */
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ case DISAS_SYSCALL:
+ gen_exception(UC32_EXCP_PRIV);
+ break;
+ }
+ if (dc->condjmp) {
+ gen_set_label(dc->condlabel);
+ gen_goto_tb(dc, 1, dc->pc);
+ dc->condjmp = 0;
+ }
+ }
+
+done_generating:
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, dc->pc - pc_start, 0);
+ qemu_log("\n");
+ }
+#endif
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j) {
+ gen_opc_instr_start[lj++] = 0;
+ }
+ } else {
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+ }
+}
+
+void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(env, tb, 0);
+}
+
+void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(env, tb, 1);
+}
+
+static const char *cpu_mode_names[16] = {
+ "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
+ "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
+};
+
+#define UCF64_DUMP_STATE
+void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ int i;
+#ifdef UCF64_DUMP_STATE
+ union {
+ uint32_t i;
+ float s;
+ } s0, s1;
+ CPU_DoubleU d;
+ /* ??? This assumes float64 and double have the same layout.
+ Oh well, it's only debug dumps. */
+ union {
+ float64 f64;
+ double d;
+ } d0;
+#endif
+ uint32_t psr;
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+ psr = cpu_asr_read(env);
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
+ psr,
+ psr & (1 << 31) ? 'N' : '-',
+ psr & (1 << 30) ? 'Z' : '-',
+ psr & (1 << 29) ? 'C' : '-',
+ psr & (1 << 28) ? 'V' : '-',
+ cpu_mode_names[psr & 0xf]);
+
+#ifdef UCF64_DUMP_STATE
+ for (i = 0; i < 16; i++) {
+ d.d = env->ucf64.regs[i];
+ s0.i = d.l.lower;
+ s1.i = d.l.upper;
+ d0.f64 = d.d;
+ cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%" PRIx64 "(%8g)\n",
+ i * 2, (int)s0.i, s0.s,
+ i * 2 + 1, (int)s1.i, s1.s,
+ i, (uint64_t)d0.f64, d0.d);
+ }
+ cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
+#endif
+}
+
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ env->regs[31] = gen_opc_pc[pc_pos];
+}