diff options
author | j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162> | 2007-03-20 22:11:31 +0000 |
---|---|---|
committer | j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162> | 2007-03-20 22:11:31 +0000 |
commit | 0487d6a8b4e15383d0651eea1e4e03ded44308b2 (patch) | |
tree | 08fa8a944867ee7df1bb258f93eba421aa5a65d3 | |
parent | 75d62a585629cdc1ae0d530189653cb1d8d9c53c (diff) | |
download | qemu-0487d6a8b4e15383d0651eea1e4e03ded44308b2.zip qemu-0487d6a8b4e15383d0651eea1e4e03ded44308b2.tar.gz qemu-0487d6a8b4e15383d0651eea1e4e03ded44308b2.tar.bz2 |
PowerPC 2.03 SPE extension - first pass.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2519 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r-- | target-ppc/cpu.h | 82 | ||||
-rw-r--r-- | target-ppc/exec.h | 6 | ||||
-rw-r--r-- | target-ppc/op.c | 815 | ||||
-rw-r--r-- | target-ppc/op_helper.c | 869 | ||||
-rw-r--r-- | target-ppc/op_helper.h | 281 | ||||
-rw-r--r-- | target-ppc/op_mem.h | 274 | ||||
-rw-r--r-- | target-ppc/op_template.h | 42 | ||||
-rw-r--r-- | target-ppc/translate.c | 822 | ||||
-rw-r--r-- | target-ppc/translate_init.c | 5 |
9 files changed, 3050 insertions, 146 deletions
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 5b1af41..e229413 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -26,15 +26,20 @@ #if defined (TARGET_PPC64) typedef uint64_t ppc_gpr_t; #define TARGET_LONG_BITS 64 +#define TARGET_GPR_BITS 64 #define REGX "%016" PRIx64 -#elif defined(TARGET_E500) +/* We can safely use PowerPC SPE extension when compiling PowerPC 64 */ +#define TARGET_PPCSPE +#elif defined(TARGET_PPCSPE) /* GPR are 64 bits: used by vector extension */ typedef uint64_t ppc_gpr_t; #define TARGET_LONG_BITS 32 +#define TARGET_GPR_BITS 64 #define REGX "%08" PRIx32 #else typedef uint32_t ppc_gpr_t; #define TARGET_LONG_BITS 32 +#define TARGET_GPR_BITS 32 #define REGX "%08" PRIx32 #endif @@ -297,7 +302,7 @@ enum { /* ld/st with reservation instructions */ /* cache control instructions */ /* spr/msr access instructions */ - PPC_INSNS_BASE = 0x00000001, + PPC_INSNS_BASE = 0x0000000000000001ULL, #define PPC_INTEGER PPC_INSNS_BASE #define PPC_FLOW PPC_INSNS_BASE #define PPC_MEM PPC_INSNS_BASE @@ -305,68 +310,72 @@ enum { #define PPC_CACHE PPC_INSNS_BASE #define PPC_MISC PPC_INSNS_BASE /* floating point operations instructions */ - PPC_FLOAT = 0x00000002, + PPC_FLOAT = 0x0000000000000002ULL, /* more floating point operations instructions */ - PPC_FLOAT_EXT = 0x00000004, + PPC_FLOAT_EXT = 0x0000000000000004ULL, /* external control instructions */ - PPC_EXTERN = 0x00000008, + PPC_EXTERN = 0x0000000000000008ULL, /* segment register access instructions */ - PPC_SEGMENT = 0x00000010, + PPC_SEGMENT = 0x0000000000000010ULL, /* Optional cache control instructions */ - PPC_CACHE_OPT = 0x00000020, + PPC_CACHE_OPT = 0x0000000000000020ULL, /* Optional floating point op instructions */ - PPC_FLOAT_OPT = 0x00000040, + PPC_FLOAT_OPT = 0x0000000000000040ULL, /* Optional memory control instructions */ - PPC_MEM_TLBIA = 0x00000080, - PPC_MEM_TLBIE = 0x00000100, - PPC_MEM_TLBSYNC = 0x00000200, + PPC_MEM_TLBIA = 0x0000000000000080ULL, + PPC_MEM_TLBIE = 0x0000000000000100ULL, + PPC_MEM_TLBSYNC = 0x0000000000000200ULL, /* eieio & sync */ - PPC_MEM_SYNC = 0x00000400, + PPC_MEM_SYNC = 0x0000000000000400ULL, /* PowerPC 6xx TLB management instructions */ - PPC_6xx_TLB = 0x00000800, + PPC_6xx_TLB = 0x0000000000000800ULL, /* Altivec support */ - PPC_ALTIVEC = 0x00001000, + PPC_ALTIVEC = 0x0000000000001000ULL, /* Time base support */ - PPC_TB = 0x00002000, + PPC_TB = 0x0000000000002000ULL, /* Embedded PowerPC dedicated instructions */ - PPC_EMB_COMMON = 0x00004000, + PPC_EMB_COMMON = 0x0000000000004000ULL, /* PowerPC 40x exception model */ - PPC_40x_EXCP = 0x00008000, + PPC_40x_EXCP = 0x0000000000008000ULL, /* PowerPC 40x specific instructions */ - PPC_40x_SPEC = 0x00010000, + PPC_40x_SPEC = 0x0000000000010000ULL, /* PowerPC 405 Mac instructions */ - PPC_405_MAC = 0x00020000, + PPC_405_MAC = 0x0000000000020000ULL, /* PowerPC 440 specific instructions */ - PPC_440_SPEC = 0x00040000, + PPC_440_SPEC = 0x0000000000040000ULL, /* Specific extensions */ /* Power-to-PowerPC bridge (601) */ - PPC_POWER_BR = 0x00080000, + PPC_POWER_BR = 0x0000000000080000ULL, /* PowerPC 602 specific */ - PPC_602_SPEC = 0x00100000, + PPC_602_SPEC = 0x0000000000100000ULL, /* Deprecated instructions */ /* Original POWER instruction set */ - PPC_POWER = 0x00200000, + PPC_POWER = 0x0000000000200000ULL, /* POWER2 instruction set extension */ - PPC_POWER2 = 0x00400000, + PPC_POWER2 = 0x0000000000400000ULL, /* Power RTC support */ - PPC_POWER_RTC = 0x00800000, + PPC_POWER_RTC = 0x0000000000800000ULL, /* 64 bits PowerPC instructions */ /* 64 bits PowerPC instruction set */ - PPC_64B = 0x01000000, + PPC_64B = 0x0000000001000000ULL, /* 64 bits hypervisor extensions */ - PPC_64H = 0x02000000, + PPC_64H = 0x0000000002000000ULL, /* 64 bits PowerPC "bridge" features */ - PPC_64_BRIDGE = 0x04000000, + PPC_64_BRIDGE = 0x0000000004000000ULL, /* BookE (embedded) PowerPC specification */ - PPC_BOOKE = 0x08000000, + PPC_BOOKE = 0x0000000008000000ULL, /* eieio */ - PPC_MEM_EIEIO = 0x10000000, + PPC_MEM_EIEIO = 0x0000000010000000ULL, /* e500 vector instructions */ - PPC_E500_VECTOR = 0x20000000, + PPC_E500_VECTOR = 0x0000000020000000ULL, /* PowerPC 4xx dedicated instructions */ - PPC_4xx_COMMON = 0x40000000, + PPC_4xx_COMMON = 0x0000000040000000ULL, /* PowerPC 2.03 specification extensions */ - PPC_203 = 0x80000000, + PPC_203 = 0x0000000080000000ULL, + /* PowerPC 2.03 SPE extension */ + PPC_SPE = 0x0000000100000000ULL, + /* PowerPC 2.03 SPE floating-point extension */ + PPC_SPEFPU = 0x0000000200000000ULL, }; /* CPU run-time flags (MMU and exception model) */ @@ -618,10 +627,10 @@ struct CPUPPCState { /* First are the most commonly used resources * during translated code execution */ -#if TARGET_LONG_BITS > HOST_LONG_BITS +#if TARGET_GPR_BITS > HOST_LONG_BITS /* temporary fixed-point registers * used to emulate 64 bits target on 32 bits hosts - */ + */ target_ulong t0, t1, t2; #endif ppc_avr_t t0_avr, t1_avr, t2_avr; @@ -683,6 +692,7 @@ struct CPUPPCState { uint32_t vscr; /* SPE registers */ ppc_gpr_t spe_acc; + float_status spe_status; uint32_t spe_fscr; /* Internal devices resources */ @@ -1192,6 +1202,8 @@ enum { #define EXCP_970_MAINT 0x1600 /* Maintenance exception */ #define EXCP_970_THRM 0x1800 /* Thermal exception */ #define EXCP_970_VPUA 0x1700 /* VPU assist exception */ +/* SPE related exceptions */ +#define EXCP_NO_SPE 0x0F20 /* SPE unavailable exception */ /* End of exception vectors area */ #define EXCP_PPC_MAX 0x4000 /* Qemu exceptions: special cases we want to stop translation */ diff --git a/target-ppc/exec.h b/target-ppc/exec.h index f1dde82..a10b62d 100644 --- a/target-ppc/exec.h +++ b/target-ppc/exec.h @@ -39,10 +39,10 @@ register unsigned long T1 asm(AREG2); register unsigned long T2 asm(AREG3); #endif /* We may, sometime, need 64 bits registers on 32 bits target */ -#if defined(TARGET_PPC64) || (HOST_LONG_BITS == 64) +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE) || (HOST_LONG_BITS == 64) #define T0_64 T0 -#define T1_64 T0 -#define T2_64 T0 +#define T1_64 T1 +#define T2_64 T2 #else /* no registers can be used */ #define T0_64 (env->t0) diff --git a/target-ppc/op.c b/target-ppc/op.c index 9531595..0b972c8 100644 --- a/target-ppc/op.c +++ b/target-ppc/op.c @@ -1326,106 +1326,14 @@ void OPPROTO op_andi_T1 (void) /* count leading zero */ void OPPROTO op_cntlzw (void) { - int cnt; - - cnt = 0; - if (!(T0 & 0xFFFF0000UL)) { - cnt += 16; - T0 <<= 16; - } - if (!(T0 & 0xFF000000UL)) { - cnt += 8; - T0 <<= 8; - } - if (!(T0 & 0xF0000000UL)) { - cnt += 4; - T0 <<= 4; - } - if (!(T0 & 0xC0000000UL)) { - cnt += 2; - T0 <<= 2; - } - if (!(T0 & 0x80000000UL)) { - cnt++; - T0 <<= 1; - } - if (!(T0 & 0x80000000UL)) { - cnt++; - } - T0 = cnt; + T0 = _do_cntlzw(T0); RETURN(); } #if defined(TARGET_PPC64) void OPPROTO op_cntlzd (void) { -#if HOST_LONG_BITS == 64 - int cnt; - - cnt = 0; - if (!(T0 & 0xFFFFFFFF00000000ULL)) { - cnt += 32; - T0 <<= 32; - } - if (!(T0 & 0xFFFF000000000000ULL)) { - cnt += 16; - T0 <<= 16; - } - if (!(T0 & 0xFF00000000000000ULL)) { - cnt += 8; - T0 <<= 8; - } - if (!(T0 & 0xF000000000000000ULL)) { - cnt += 4; - T0 <<= 4; - } - if (!(T0 & 0xC000000000000000ULL)) { - cnt += 2; - T0 <<= 2; - } - if (!(T0 & 0x8000000000000000ULL)) { - cnt++; - T0 <<= 1; - } - if (!(T0 & 0x8000000000000000ULL)) { - cnt++; - } - T0 = cnt; -#else - uint32_t tmp; - - /* Make it easier on 32 bits host machines */ - if (!(T0 >> 32)) { - tmp = T0; - T0 = 32; - } else { - tmp = T0 >> 32; - T0 = 0; - } - if (!(tmp & 0xFFFF0000UL)) { - T0 += 16; - tmp <<= 16; - } - if (!(tmp & 0xFF000000UL)) { - T0 += 8; - tmp <<= 8; - } - if (!(tmp & 0xF0000000UL)) { - T0 += 4; - tmp <<= 4; - } - if (!(tmp & 0xC0000000UL)) { - T0 += 2; - tmp <<= 2; - } - if (!(tmp & 0x80000000UL)) { - T0++; - tmp <<= 1; - } - if (!(tmp & 0x80000000UL)) { - T0++; - } -#endif + T0 = _do_cntlzd(T0); RETURN(); } #endif @@ -2462,4 +2370,723 @@ void OPPROTO op_store_booke_tsr (void) store_booke_tsr(env, T0); RETURN(); } + #endif /* !defined(CONFIG_USER_ONLY) */ + +#if defined(TARGET_PPCSPE) +/* SPE extension */ +void OPPROTO op_splatw_T1_64 (void) +{ + T1_64 = (T1_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL); +} + +void OPPROTO op_splatwi_T0_64 (void) +{ + uint64_t tmp = PARAM1; + + T0_64 = (tmp << 32) | tmp; +} + +void OPPROTO op_splatwi_T1_64 (void) +{ + uint64_t tmp = PARAM1; + + T1_64 = (tmp << 32) | tmp; +} + +void OPPROTO op_extsh_T1_64 (void) +{ + T1_64 = (int32_t)((int16_t)T1_64); + RETURN(); +} + +void OPPROTO op_sli16_T1_64 (void) +{ + T1_64 = T1_64 << 16; + RETURN(); +} + +void OPPROTO op_sli32_T1_64 (void) +{ + T1_64 = T1_64 << 32; + RETURN(); +} + +void OPPROTO op_srli32_T1_64 (void) +{ + T1_64 = T1_64 >> 32; + RETURN(); +} + +void OPPROTO op_evsel (void) +{ + do_evsel(); + RETURN(); +} + +void OPPROTO op_evaddw (void) +{ + do_evaddw(); + RETURN(); +} + +void OPPROTO op_evsubfw (void) +{ + do_evsubfw(); + RETURN(); +} + +void OPPROTO op_evneg (void) +{ + do_evneg(); + RETURN(); +} + +void OPPROTO op_evabs (void) +{ + do_evabs(); + RETURN(); +} + +void OPPROTO op_evextsh (void) +{ + T0_64 = ((uint64_t)((int32_t)(int16_t)(T0_64 >> 32)) << 32) | + (uint64_t)((int32_t)(int16_t)T0_64); + RETURN(); +} + +void OPPROTO op_evextsb (void) +{ + T0_64 = ((uint64_t)((int32_t)(int8_t)(T0_64 >> 32)) << 32) | + (uint64_t)((int32_t)(int8_t)T0_64); + RETURN(); +} + +void OPPROTO op_evcntlzw (void) +{ + do_evcntlzw(); + RETURN(); +} + +void OPPROTO op_evrndw (void) +{ + do_evrndw(); + RETURN(); +} + +void OPPROTO op_brinc (void) +{ + do_brinc(); + RETURN(); +} + +void OPPROTO op_evcntlsw (void) +{ + do_evcntlsw(); + RETURN(); +} + +void OPPROTO op_evand (void) +{ + T0_64 &= T1_64; + RETURN(); +} + +void OPPROTO op_evandc (void) +{ + T0_64 &= ~T1_64; + RETURN(); +} + +void OPPROTO op_evor (void) +{ + T0_64 |= T1_64; + RETURN(); +} + +void OPPROTO op_evxor (void) +{ + T0_64 ^= T1_64; + RETURN(); +} + +void OPPROTO op_eveqv (void) +{ + T0_64 = ~(T0_64 ^ T1_64); + RETURN(); +} + +void OPPROTO op_evnor (void) +{ + T0_64 = ~(T0_64 | T1_64); + RETURN(); +} + +void OPPROTO op_evorc (void) +{ + T0_64 |= ~T1_64; + RETURN(); +} + +void OPPROTO op_evnand (void) +{ + T0_64 = ~(T0_64 & T1_64); + RETURN(); +} + +void OPPROTO op_evsrws (void) +{ + do_evsrws(); + RETURN(); +} + +void OPPROTO op_evsrwu (void) +{ + do_evsrwu(); + RETURN(); +} + +void OPPROTO op_evslw (void) +{ + do_evslw(); + RETURN(); +} + +void OPPROTO op_evrlw (void) +{ + do_evrlw(); + RETURN(); +} + +void OPPROTO op_evmergelo (void) +{ + T0_64 = (T0_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL); + RETURN(); +} + +void OPPROTO op_evmergehi (void) +{ + T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 >> 32); + RETURN(); +} + +void OPPROTO op_evmergelohi (void) +{ + T0_64 = (T0_64 << 32) | (T1_64 >> 32); + RETURN(); +} + +void OPPROTO op_evmergehilo (void) +{ + T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 & 0x00000000FFFFFFFFULL); + RETURN(); +} + +void OPPROTO op_evcmpgts (void) +{ + do_evcmpgts(); + RETURN(); +} + +void OPPROTO op_evcmpgtu (void) +{ + do_evcmpgtu(); + RETURN(); +} + +void OPPROTO op_evcmplts (void) +{ + do_evcmplts(); + RETURN(); +} + +void OPPROTO op_evcmpltu (void) +{ + do_evcmpltu(); + RETURN(); +} + +void OPPROTO op_evcmpeq (void) +{ + do_evcmpeq(); + RETURN(); +} + +void OPPROTO op_evfssub (void) +{ + do_evfssub(); + RETURN(); +} + +void OPPROTO op_evfsadd (void) +{ + do_evfsadd(); + RETURN(); +} + +void OPPROTO op_evfsnabs (void) +{ + do_evfsnabs(); + RETURN(); +} + +void OPPROTO op_evfsabs (void) +{ + do_evfsabs(); + RETURN(); +} + +void OPPROTO op_evfsneg (void) +{ + do_evfsneg(); + RETURN(); +} + +void OPPROTO op_evfsdiv (void) +{ + do_evfsdiv(); + RETURN(); +} + +void OPPROTO op_evfsmul (void) +{ + do_evfsmul(); + RETURN(); +} + +void OPPROTO op_evfscmplt (void) +{ + do_evfscmplt(); + RETURN(); +} + +void OPPROTO op_evfscmpgt (void) +{ + do_evfscmpgt(); + RETURN(); +} + +void OPPROTO op_evfscmpeq (void) +{ + do_evfscmpeq(); + RETURN(); +} + +void OPPROTO op_evfscfsi (void) +{ + do_evfscfsi(); + RETURN(); +} + +void OPPROTO op_evfscfui (void) +{ + do_evfscfui(); + RETURN(); +} + +void OPPROTO op_evfscfsf (void) +{ + do_evfscfsf(); + RETURN(); +} + +void OPPROTO op_evfscfuf (void) +{ + do_evfscfuf(); + RETURN(); +} + +void OPPROTO op_evfsctsi (void) +{ + do_evfsctsi(); + RETURN(); +} + +void OPPROTO op_evfsctui (void) +{ + do_evfsctui(); + RETURN(); +} + +void OPPROTO op_evfsctsf (void) +{ + do_evfsctsf(); + RETURN(); +} + +void OPPROTO op_evfsctuf (void) +{ + do_evfsctuf(); + RETURN(); +} + +void OPPROTO op_evfsctuiz (void) +{ + do_evfsctuiz(); + RETURN(); +} + +void OPPROTO op_evfsctsiz (void) +{ + do_evfsctsiz(); + RETURN(); +} + +void OPPROTO op_evfststlt (void) +{ + do_evfststlt(); + RETURN(); +} + +void OPPROTO op_evfststgt (void) +{ + do_evfststgt(); + RETURN(); +} + +void OPPROTO op_evfststeq (void) +{ + do_evfststeq(); + RETURN(); +} + +void OPPROTO op_efssub (void) +{ + T0_64 = _do_efssub(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efsadd (void) +{ + T0_64 = _do_efsadd(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efsnabs (void) +{ + T0_64 = _do_efsnabs(T0_64); + RETURN(); +} + +void OPPROTO op_efsabs (void) +{ + T0_64 = _do_efsabs(T0_64); + RETURN(); +} + +void OPPROTO op_efsneg (void) +{ + T0_64 = _do_efsneg(T0_64); + RETURN(); +} + +void OPPROTO op_efsdiv (void) +{ + T0_64 = _do_efsdiv(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efsmul (void) +{ + T0_64 = _do_efsmul(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efscmplt (void) +{ + do_efscmplt(); + RETURN(); +} + +void OPPROTO op_efscmpgt (void) +{ + do_efscmpgt(); + RETURN(); +} + +void OPPROTO op_efscfd (void) +{ + do_efscfd(); + RETURN(); +} + +void OPPROTO op_efscmpeq (void) +{ + do_efscmpeq(); + RETURN(); +} + +void OPPROTO op_efscfsi (void) +{ + do_efscfsi(); + RETURN(); +} + +void OPPROTO op_efscfui (void) +{ + do_efscfui(); + RETURN(); +} + +void OPPROTO op_efscfsf (void) +{ + do_efscfsf(); + RETURN(); +} + +void OPPROTO op_efscfuf (void) +{ + do_efscfuf(); + RETURN(); +} + +void OPPROTO op_efsctsi (void) +{ + do_efsctsi(); + RETURN(); +} + +void OPPROTO op_efsctui (void) +{ + do_efsctui(); + RETURN(); +} + +void OPPROTO op_efsctsf (void) +{ + do_efsctsf(); + RETURN(); +} + +void OPPROTO op_efsctuf (void) +{ + do_efsctuf(); + RETURN(); +} + +void OPPROTO op_efsctsiz (void) +{ + do_efsctsiz(); + RETURN(); +} + +void OPPROTO op_efsctuiz (void) +{ + do_efsctuiz(); + RETURN(); +} + +void OPPROTO op_efststlt (void) +{ + T0 = _do_efststlt(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efststgt (void) +{ + T0 = _do_efststgt(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efststeq (void) +{ + T0 = _do_efststeq(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efdsub (void) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = T0_64; + u2.u = T1_64; + u1.f = float64_sub(u1.f, u2.f, &env->spe_status); + T0_64 = u1.u; + RETURN(); +} + +void OPPROTO op_efdadd (void) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = T0_64; + u2.u = T1_64; + u1.f = float64_add(u1.f, u2.f, &env->spe_status); + T0_64 = u1.u; + RETURN(); +} + +void OPPROTO op_efdcfsid (void) +{ + do_efdcfsi(); + RETURN(); +} + +void OPPROTO op_efdcfuid (void) +{ + do_efdcfui(); + RETURN(); +} + +void OPPROTO op_efdnabs (void) +{ + T0_64 |= 0x8000000000000000ULL; + RETURN(); +} + +void OPPROTO op_efdabs (void) +{ + T0_64 &= ~0x8000000000000000ULL; + RETURN(); +} + +void OPPROTO op_efdneg (void) +{ + T0_64 ^= 0x8000000000000000ULL; + RETURN(); +} + +void OPPROTO op_efddiv (void) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = T0_64; + u2.u = T1_64; + u1.f = float64_div(u1.f, u2.f, &env->spe_status); + T0_64 = u1.u; + RETURN(); +} + +void OPPROTO op_efdmul (void) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = T0_64; + u2.u = T1_64; + u1.f = float64_mul(u1.f, u2.f, &env->spe_status); + T0_64 = u1.u; + RETURN(); +} + +void OPPROTO op_efdctsidz (void) +{ + do_efdctsiz(); + RETURN(); +} + +void OPPROTO op_efdctuidz (void) +{ + do_efdctuiz(); + RETURN(); +} + +void OPPROTO op_efdcmplt (void) +{ + do_efdcmplt(); + RETURN(); +} + +void OPPROTO op_efdcmpgt (void) +{ + do_efdcmpgt(); + RETURN(); +} + +void OPPROTO op_efdcfs (void) +{ + do_efdcfs(); + RETURN(); +} + +void OPPROTO op_efdcmpeq (void) +{ + do_efdcmpeq(); + RETURN(); +} + +void OPPROTO op_efdcfsi (void) +{ + do_efdcfsi(); + RETURN(); +} + +void OPPROTO op_efdcfui (void) +{ + do_efdcfui(); + RETURN(); +} + +void OPPROTO op_efdcfsf (void) +{ + do_efdcfsf(); + RETURN(); +} + +void OPPROTO op_efdcfuf (void) +{ + do_efdcfuf(); + RETURN(); +} + +void OPPROTO op_efdctsi (void) +{ + do_efdctsi(); + RETURN(); +} + +void OPPROTO op_efdctui (void) +{ + do_efdctui(); + RETURN(); +} + +void OPPROTO op_efdctsf (void) +{ + do_efdctsf(); + RETURN(); +} + +void OPPROTO op_efdctuf (void) +{ + do_efdctuf(); + RETURN(); +} + +void OPPROTO op_efdctuiz (void) +{ + do_efdctuiz(); + RETURN(); +} + +void OPPROTO op_efdctsiz (void) +{ + do_efdctsiz(); + RETURN(); +} + +void OPPROTO op_efdtstlt (void) +{ + T0 = _do_efdtstlt(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efdtstgt (void) +{ + T0 = _do_efdtstgt(T0_64, T1_64); + RETURN(); +} + +void OPPROTO op_efdtsteq (void) +{ + T0 = _do_efdtsteq(T0_64, T1_64); + RETURN(); +} +#endif /* defined(TARGET_PPCSPE) */ diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c index 7ef06b2..756d164 100644 --- a/target-ppc/op_helper.c +++ b/target-ppc/op_helper.c @@ -19,12 +19,17 @@ */ #include "exec.h" +#include "op_helper.h" + #define MEMSUFFIX _raw +#include "op_helper.h" #include "op_helper_mem.h" #if !defined(CONFIG_USER_ONLY) #define MEMSUFFIX _user +#include "op_helper.h" #include "op_helper_mem.h" #define MEMSUFFIX _kernel +#include "op_helper.h" #include "op_helper_mem.h" #endif @@ -229,7 +234,7 @@ void do_mul64 (uint64_t *plow, uint64_t *phigh) mul64(plow, phigh, T0, T1); } -static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) +static void imul64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) { int sa, sb; sa = (a < 0); @@ -1119,6 +1124,868 @@ void do_440_dlmzb (void) T0 = i; } +#if defined(TARGET_PPCSPE) +/* SPE extension helpers */ +/* Use a table to make this quicker */ +static uint8_t hbrev[16] = { + 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE, + 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF, +}; + +static inline uint8_t byte_reverse (uint8_t val) +{ + return hbrev[val >> 4] | (hbrev[val & 0xF] << 4); +} + +static inline uint32_t word_reverse (uint32_t val) +{ + return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) | + (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24); +} + +#define MASKBITS 16 // Random value - to be fixed +void do_brinc (void) +{ + uint32_t a, b, d, mask; + + mask = (uint32_t)(-1UL) >> MASKBITS; + b = T1_64 & mask; + a = T0_64 & mask; + d = word_reverse(1 + word_reverse(a | ~mask)); + T0_64 = (T0_64 & ~mask) | (d & mask); +} + +#define DO_SPE_OP2(name) \ +void do_ev##name (void) \ +{ \ + T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \ + (uint64_t)_do_e##name(T0_64, T1_64); \ +} + +#define DO_SPE_OP1(name) \ +void do_ev##name (void) \ +{ \ + T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \ + (uint64_t)_do_e##name(T0_64); \ +} + +/* Fixed-point vector arithmetic */ +static inline uint32_t _do_eabs (uint32_t val) +{ + if (val != 0x80000000) + val &= ~0x80000000; + + return val; +} + +static inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2) +{ + return op1 + op2; +} + +static inline int _do_ecntlsw (uint32_t val) +{ + if (val & 0x80000000) + return _do_cntlzw(~val); + else + return _do_cntlzw(val); +} + +static inline int _do_ecntlzw (uint32_t val) +{ + return _do_cntlzw(val); +} + +static inline uint32_t _do_eneg (uint32_t val) +{ + if (val != 0x80000000) + val ^= 0x80000000; + + return val; +} + +static inline uint32_t _do_erlw (uint32_t op1, uint32_t op2) +{ + return rotl32(op1, op2); +} + +static inline uint32_t _do_erndw (uint32_t val) +{ + return (val + 0x000080000000) & 0xFFFF0000; +} + +static inline uint32_t _do_eslw (uint32_t op1, uint32_t op2) +{ + /* No error here: 6 bits are used */ + return op1 << (op2 & 0x3F); +} + +static inline int32_t _do_esrws (int32_t op1, uint32_t op2) +{ + /* No error here: 6 bits are used */ + return op1 >> (op2 & 0x3F); +} + +static inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2) +{ + /* No error here: 6 bits are used */ + return op1 >> (op2 & 0x3F); +} + +static inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2) +{ + return op2 - op1; +} + +/* evabs */ +DO_SPE_OP1(abs); +/* evaddw */ +DO_SPE_OP2(addw); +/* evcntlsw */ +DO_SPE_OP1(cntlsw); +/* evcntlzw */ +DO_SPE_OP1(cntlzw); +/* evneg */ +DO_SPE_OP1(neg); +/* evrlw */ +DO_SPE_OP2(rlw); +/* evrnd */ +DO_SPE_OP1(rndw); +/* evslw */ +DO_SPE_OP2(slw); +/* evsrws */ +DO_SPE_OP2(srws); +/* evsrwu */ +DO_SPE_OP2(srwu); +/* evsubfw */ +DO_SPE_OP2(subfw); + +/* evsel is a little bit more complicated... */ +static inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n) +{ + if (n) + return op1; + else + return op2; +} + +void do_evsel (void) +{ + T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) | + (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1); +} + +/* Fixed-point vector comparisons */ +#define DO_SPE_CMP(name) \ +void do_ev##name (void) \ +{ \ + T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \ + T1_64 >> 32) << 32, \ + _do_e##name(T0_64, T1_64)); \ +} + +static inline uint32_t _do_evcmp_merge (int t0, int t1) +{ + return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); +} +static inline int _do_ecmpeq (uint32_t op1, uint32_t op2) +{ + return op1 == op2 ? 1 : 0; +} + +static inline int _do_ecmpgts (int32_t op1, int32_t op2) +{ + return op1 > op2 ? 1 : 0; +} + +static inline int _do_ecmpgtu (uint32_t op1, uint32_t op2) +{ + return op1 > op2 ? 1 : 0; +} + +static inline int _do_ecmplts (int32_t op1, int32_t op2) +{ + return op1 < op2 ? 1 : 0; +} + +static inline int _do_ecmpltu (uint32_t op1, uint32_t op2) +{ + return op1 < op2 ? 1 : 0; +} + +/* evcmpeq */ +DO_SPE_CMP(cmpeq); +/* evcmpgts */ +DO_SPE_CMP(cmpgts); +/* evcmpgtu */ +DO_SPE_CMP(cmpgtu); +/* evcmplts */ +DO_SPE_CMP(cmplts); +/* evcmpltu */ +DO_SPE_CMP(cmpltu); + +/* Single precision floating-point conversions from/to integer */ +static inline uint32_t _do_efscfsi (int32_t val) +{ + union { + uint32_t u; + float32 f; + } u; + + u.f = int32_to_float32(val, &env->spe_status); + + return u.u; +} + +static inline uint32_t _do_efscfui (uint32_t val) +{ + union { + uint32_t u; + float32 f; + } u; + + u.f = uint32_to_float32(val, &env->spe_status); + + return u.u; +} + +static inline int32_t _do_efsctsi (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float32_to_int32(u.f, &env->spe_status); +} + +static inline uint32_t _do_efsctui (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float32_to_uint32(u.f, &env->spe_status); +} + +static inline int32_t _do_efsctsiz (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float32_to_int32_round_to_zero(u.f, &env->spe_status); +} + +static inline uint32_t _do_efsctuiz (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float32_to_uint32_round_to_zero(u.f, &env->spe_status); +} + +void do_efscfsi (void) +{ + T0_64 = _do_efscfsi(T0_64); +} + +void do_efscfui (void) +{ + T0_64 = _do_efscfui(T0_64); +} + +void do_efsctsi (void) +{ + T0_64 = _do_efsctsi(T0_64); +} + +void do_efsctui (void) +{ + T0_64 = _do_efsctui(T0_64); +} + +void do_efsctsiz (void) +{ + T0_64 = _do_efsctsiz(T0_64); +} + +void do_efsctuiz (void) +{ + T0_64 = _do_efsctuiz(T0_64); +} + +/* Single precision floating-point conversion to/from fractional */ +static inline uint32_t _do_efscfsf (uint32_t val) +{ + union { + uint32_t u; + float32 f; + } u; + float32 tmp; + + u.f = int32_to_float32(val, &env->spe_status); + tmp = int64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_div(u.f, tmp, &env->spe_status); + + return u.u; +} + +static inline uint32_t _do_efscfuf (uint32_t val) +{ + union { + uint32_t u; + float32 f; + } u; + float32 tmp; + + u.f = uint32_to_float32(val, &env->spe_status); + tmp = uint64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_div(u.f, tmp, &env->spe_status); + + return u.u; +} + +static inline int32_t _do_efsctsf (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + float32 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_mul(u.f, tmp, &env->spe_status); + + return float32_to_int32(u.f, &env->spe_status); +} + +static inline uint32_t _do_efsctuf (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + float32 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_mul(u.f, tmp, &env->spe_status); + + return float32_to_uint32(u.f, &env->spe_status); +} + +static inline int32_t _do_efsctsfz (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + float32 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_mul(u.f, tmp, &env->spe_status); + + return float32_to_int32_round_to_zero(u.f, &env->spe_status); +} + +static inline uint32_t _do_efsctufz (uint32_t val) +{ + union { + int32_t u; + float32 f; + } u; + float32 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float32(1ULL << 32, &env->spe_status); + u.f = float32_mul(u.f, tmp, &env->spe_status); + + return float32_to_uint32_round_to_zero(u.f, &env->spe_status); +} + +void do_efscfsf (void) +{ + T0_64 = _do_efscfsf(T0_64); +} + +void do_efscfuf (void) +{ + T0_64 = _do_efscfuf(T0_64); +} + +void do_efsctsf (void) +{ + T0_64 = _do_efsctsf(T0_64); +} + +void do_efsctuf (void) +{ + T0_64 = _do_efsctuf(T0_64); +} + +void do_efsctsfz (void) +{ + T0_64 = _do_efsctsfz(T0_64); +} + +void do_efsctufz (void) +{ + T0_64 = _do_efsctufz(T0_64); +} + +/* Double precision floating point helpers */ +static inline int _do_efdcmplt (uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efdtstlt(op1, op2); +} + +static inline int _do_efdcmpgt (uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efdtstgt(op1, op2); +} + +static inline int _do_efdcmpeq (uint64_t op1, uint64_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efdtsteq(op1, op2); +} + +void do_efdcmplt (void) +{ + T0 = _do_efdcmplt(T0_64, T1_64); +} + +void do_efdcmpgt (void) +{ + T0 = _do_efdcmpgt(T0_64, T1_64); +} + +void do_efdcmpeq (void) +{ + T0 = _do_efdcmpeq(T0_64, T1_64); +} + +/* Double precision floating-point conversion to/from integer */ +static inline uint64_t _do_efdcfsi (int64_t val) +{ + union { + uint64_t u; + float64 f; + } u; + + u.f = int64_to_float64(val, &env->spe_status); + + return u.u; +} + +static inline uint64_t _do_efdcfui (uint64_t val) +{ + union { + uint64_t u; + float64 f; + } u; + + u.f = uint64_to_float64(val, &env->spe_status); + + return u.u; +} + +static inline int64_t _do_efdctsi (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float64_to_int64(u.f, &env->spe_status); +} + +static inline uint64_t _do_efdctui (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float64_to_uint64(u.f, &env->spe_status); +} + +static inline int64_t _do_efdctsiz (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float64_to_int64_round_to_zero(u.f, &env->spe_status); +} + +static inline uint64_t _do_efdctuiz (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + + return float64_to_uint64_round_to_zero(u.f, &env->spe_status); +} + +void do_efdcfsi (void) +{ + T0_64 = _do_efdcfsi(T0_64); +} + +void do_efdcfui (void) +{ + T0_64 = _do_efdcfui(T0_64); +} + +void do_efdctsi (void) +{ + T0_64 = _do_efdctsi(T0_64); +} + +void do_efdctui (void) +{ + T0_64 = _do_efdctui(T0_64); +} + +void do_efdctsiz (void) +{ + T0_64 = _do_efdctsiz(T0_64); +} + +void do_efdctuiz (void) +{ + T0_64 = _do_efdctuiz(T0_64); +} + +/* Double precision floating-point conversion to/from fractional */ +static inline uint64_t _do_efdcfsf (int64_t val) +{ + union { + uint64_t u; + float64 f; + } u; + float64 tmp; + + u.f = int32_to_float64(val, &env->spe_status); + tmp = int64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_div(u.f, tmp, &env->spe_status); + + return u.u; +} + +static inline uint64_t _do_efdcfuf (uint64_t val) +{ + union { + uint64_t u; + float64 f; + } u; + float64 tmp; + + u.f = uint32_to_float64(val, &env->spe_status); + tmp = int64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_div(u.f, tmp, &env->spe_status); + + return u.u; +} + +static inline int64_t _do_efdctsf (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + float64 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_mul(u.f, tmp, &env->spe_status); + + return float64_to_int32(u.f, &env->spe_status); +} + +static inline uint64_t _do_efdctuf (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + float64 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_mul(u.f, tmp, &env->spe_status); + + return float64_to_uint32(u.f, &env->spe_status); +} + +static inline int64_t _do_efdctsfz (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + float64 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_mul(u.f, tmp, &env->spe_status); + + return float64_to_int32_round_to_zero(u.f, &env->spe_status); +} + +static inline uint64_t _do_efdctufz (uint64_t val) +{ + union { + int64_t u; + float64 f; + } u; + float64 tmp; + + u.u = val; + /* NaN are not treated the same way IEEE 754 does */ + if (unlikely(isnan(u.f))) + return 0; + tmp = uint64_to_float64(1ULL << 32, &env->spe_status); + u.f = float64_mul(u.f, tmp, &env->spe_status); + + return float64_to_uint32_round_to_zero(u.f, &env->spe_status); +} + +void do_efdcfsf (void) +{ + T0_64 = _do_efdcfsf(T0_64); +} + +void do_efdcfuf (void) +{ + T0_64 = _do_efdcfuf(T0_64); +} + +void do_efdctsf (void) +{ + T0_64 = _do_efdctsf(T0_64); +} + +void do_efdctuf (void) +{ + T0_64 = _do_efdctuf(T0_64); +} + +void do_efdctsfz (void) +{ + T0_64 = _do_efdctsfz(T0_64); +} + +void do_efdctufz (void) +{ + T0_64 = _do_efdctufz(T0_64); +} + +/* Floating point conversion between single and double precision */ +static inline uint32_t _do_efscfd (uint64_t val) +{ + union { + uint64_t u; + float64 f; + } u1; + union { + uint32_t u; + float32 f; + } u2; + + u1.u = val; + u2.f = float64_to_float32(u1.f, &env->spe_status); + + return u2.u; +} + +static inline uint64_t _do_efdcfs (uint32_t val) +{ + union { + uint64_t u; + float64 f; + } u2; + union { + uint32_t u; + float32 f; + } u1; + + u1.u = val; + u2.f = float32_to_float64(u1.f, &env->spe_status); + + return u2.u; +} + +void do_efscfd (void) +{ + T0_64 = _do_efscfd(T0_64); +} + +void do_efdcfs (void) +{ + T0_64 = _do_efdcfs(T0_64); +} + +/* Single precision fixed-point vector arithmetic */ +/* evfsabs */ +DO_SPE_OP1(fsabs); +/* evfsnabs */ +DO_SPE_OP1(fsnabs); +/* evfsneg */ +DO_SPE_OP1(fsneg); +/* evfsadd */ +DO_SPE_OP2(fsadd); +/* evfssub */ +DO_SPE_OP2(fssub); +/* evfsmul */ +DO_SPE_OP2(fsmul); +/* evfsdiv */ +DO_SPE_OP2(fsdiv); + +/* Single-precision floating-point comparisons */ +static inline int _do_efscmplt (uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efststlt(op1, op2); +} + +static inline int _do_efscmpgt (uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efststgt(op1, op2); +} + +static inline int _do_efscmpeq (uint32_t op1, uint32_t op2) +{ + /* XXX: TODO: test special values (NaN, infinites, ...) */ + return _do_efststeq(op1, op2); +} + +void do_efscmplt (void) +{ + T0 = _do_efscmplt(T0_64, T1_64); +} + +void do_efscmpgt (void) +{ + T0 = _do_efscmpgt(T0_64, T1_64); +} + +void do_efscmpeq (void) +{ + T0 = _do_efscmpeq(T0_64, T1_64); +} + +/* Single-precision floating-point vector comparisons */ +/* evfscmplt */ +DO_SPE_CMP(fscmplt); +/* evfscmpgt */ +DO_SPE_CMP(fscmpgt); +/* evfscmpeq */ +DO_SPE_CMP(fscmpeq); +/* evfststlt */ +DO_SPE_CMP(fststlt); +/* evfststgt */ +DO_SPE_CMP(fststgt); +/* evfststeq */ +DO_SPE_CMP(fststeq); + +/* Single-precision floating-point vector conversions */ +/* evfscfsi */ +DO_SPE_OP1(fscfsi); +/* evfscfui */ +DO_SPE_OP1(fscfui); +/* evfscfuf */ +DO_SPE_OP1(fscfuf); +/* evfscfsf */ +DO_SPE_OP1(fscfsf); +/* evfsctsi */ +DO_SPE_OP1(fsctsi); +/* evfsctui */ +DO_SPE_OP1(fsctui); +/* evfsctsiz */ +DO_SPE_OP1(fsctsiz); +/* evfsctuiz */ +DO_SPE_OP1(fsctuiz); +/* evfsctsf */ +DO_SPE_OP1(fsctsf); +/* evfsctuf */ +DO_SPE_OP1(fsctuf); +#endif /* defined(TARGET_PPCSPE) */ + /*****************************************************************************/ /* Softmmu support */ #if !defined (CONFIG_USER_ONLY) diff --git a/target-ppc/op_helper.h b/target-ppc/op_helper.h index 6eaceb3..8a735c1 100644 --- a/target-ppc/op_helper.h +++ b/target-ppc/op_helper.h @@ -100,6 +100,7 @@ void do_fctiwz (void); void do_fcmpu (void); void do_fcmpo (void); +/* Misc */ void do_tw (int flags); #if defined(TARGET_PPC64) void do_td (int flags); @@ -157,11 +158,291 @@ void do_4xx_tlbwe_lo (void); void do_4xx_tlbwe_hi (void); #endif +/* PowerPC 440 specific helpers */ void do_440_dlmzb (void); +/* PowerPC 403 specific helpers */ #if !defined(CONFIG_USER_ONLY) void do_load_403_pb (int num); void do_store_403_pb (int num); #endif +#if defined(TARGET_PPCSPE) +/* SPE extension helpers */ +void do_brinc (void); +/* Fixed-point vector helpers */ +void do_evabs (void); +void do_evaddw (void); +void do_evcntlsw (void); +void do_evcntlzw (void); +void do_evneg (void); +void do_evrlw (void); +void do_evsel (void); +void do_evrndw (void); +void do_evslw (void); +void do_evsrws (void); +void do_evsrwu (void); +void do_evsubfw (void); +void do_evcmpeq (void); +void do_evcmpgts (void); +void do_evcmpgtu (void); +void do_evcmplts (void); +void do_evcmpltu (void); + +/* Single precision floating-point helpers */ +void do_efscmplt (void); +void do_efscmpgt (void); +void do_efscmpeq (void); +void do_efscfsf (void); +void do_efscfuf (void); +void do_efsctsf (void); +void do_efsctuf (void); + +void do_efscfsi (void); +void do_efscfui (void); +void do_efsctsi (void); +void do_efsctui (void); +void do_efsctsiz (void); +void do_efsctuiz (void); + +/* Double precision floating-point helpers */ +void do_efdcmplt (void); +void do_efdcmpgt (void); +void do_efdcmpeq (void); +void do_efdcfsf (void); +void do_efdcfuf (void); +void do_efdctsf (void); +void do_efdctuf (void); + +void do_efdcfsi (void); +void do_efdcfui (void); +void do_efdctsi (void); +void do_efdctui (void); +void do_efdctsiz (void); +void do_efdctuiz (void); + +void do_efdcfs (void); +void do_efscfd (void); + +/* Floating-point vector helpers */ +void do_evfsabs (void); +void do_evfsnabs (void); +void do_evfsneg (void); +void do_evfsadd (void); +void do_evfssub (void); +void do_evfsmul (void); +void do_evfsdiv (void); +void do_evfscmplt (void); +void do_evfscmpgt (void); +void do_evfscmpeq (void); +void do_evfststlt (void); +void do_evfststgt (void); +void do_evfststeq (void); +void do_evfscfsi (void); +void do_evfscfui (void); +void do_evfscfsf (void); +void do_evfscfuf (void); +void do_evfsctsf (void); +void do_evfsctuf (void); +void do_evfsctsi (void); +void do_evfsctui (void); +void do_evfsctsiz (void); +void do_evfsctuiz (void); +#endif /* defined(TARGET_PPCSPE) */ + +/* Inlined helpers: used in micro-operation as well as helpers */ +/* Generic fixed-point helpers */ +static inline int _do_cntlzw (uint32_t val) +{ + int cnt = 0; + if (!(val & 0xFFFF0000UL)) { + cnt += 16; + val <<= 16; + } + if (!(val & 0xFF000000UL)) { + cnt += 8; + val <<= 8; + } + if (!(val & 0xF0000000UL)) { + cnt += 4; + val <<= 4; + } + if (!(val & 0xC0000000UL)) { + cnt += 2; + val <<= 2; + } + if (!(val & 0x80000000UL)) { + cnt++; + val <<= 1; + } + if (!(val & 0x80000000UL)) { + cnt++; + } + return cnt; +} + +static inline int _do_cntlzd (uint64_t val) +{ + int cnt = 0; +#if HOST_LONG_BITS == 64 + if (!(val & 0xFFFFFFFF00000000ULL)) { + cnt += 32; + val <<= 32; + } + if (!(val & 0xFFFF000000000000ULL)) { + cnt += 16; + val <<= 16; + } + if (!(val & 0xFF00000000000000ULL)) { + cnt += 8; + val <<= 8; + } + if (!(val & 0xF000000000000000ULL)) { + cnt += 4; + val <<= 4; + } + if (!(val & 0xC000000000000000ULL)) { + cnt += 2; + val <<= 2; + } + if (!(val & 0x8000000000000000ULL)) { + cnt++; + val <<= 1; + } + if (!(val & 0x8000000000000000ULL)) { + cnt++; + } +#else + uint32_t tmp; + /* Make it easier on 32 bits host machines */ + if (!(val >> 32)) + cnt = cntlzw(val) + 32; + else + cnt = cntlzw(val >> 32); +#endif + return cnt; +} + +#if defined(TARGET_PPCSPE) +/* SPE extension */ +/* Single precision floating-point helpers */ +static inline uint32_t _do_efsabs (uint32_t val) +{ + return val & ~0x80000000; +} +static inline uint32_t _do_efsnabs (uint32_t val) +{ + return val | 0x80000000; +} +static inline uint32_t _do_efsneg (uint32_t val) +{ + return val ^ 0x80000000; +} +static inline uint32_t _do_efsadd (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + u1.f = float32_add(u1.f, u2.f, &env->spe_status); + return u1.u; +} +static inline uint32_t _do_efssub (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + u1.f = float32_sub(u1.f, u2.f, &env->spe_status); + return u1.u; +} +static inline uint32_t _do_efsmul (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + u1.f = float32_mul(u1.f, u2.f, &env->spe_status); + return u1.u; +} +static inline uint32_t _do_efsdiv (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + u1.f = float32_div(u1.f, u2.f, &env->spe_status); + return u1.u; +} + +static inline int _do_efststlt (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float32_lt(u1.f, u2.f, &env->spe_status) ? 1 : 0; +} +static inline int _do_efststgt (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 1; +} +static inline int _do_efststeq (uint32_t op1, uint32_t op2) +{ + union { + uint32_t u; + float32 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float32_eq(u1.f, u2.f, &env->spe_status) ? 1 : 0; +} +/* Double precision floating-point helpers */ +static inline int _do_efdtstlt (uint64_t op1, uint64_t op2) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float64_lt(u1.f, u2.f, &env->spe_status) ? 1 : 0; +} +static inline int _do_efdtstgt (uint64_t op1, uint64_t op2) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float64_le(u1.f, u2.f, &env->spe_status) ? 0 : 1; +} +static inline int _do_efdtsteq (uint64_t op1, uint64_t op2) +{ + union { + uint64_t u; + float64 f; + } u1, u2; + u1.u = op1; + u2.u = op2; + return float64_eq(u1.f, u2.f, &env->spe_status) ? 1 : 0; +} +#endif /* defined(TARGET_PPCSPE) */ #endif diff --git a/target-ppc/op_mem.h b/target-ppc/op_mem.h index f5a8c4b..f080abc 100644 --- a/target-ppc/op_mem.h +++ b/target-ppc/op_mem.h @@ -37,12 +37,7 @@ static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA) ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24); } -#if defined(TARGET_PPC64) -static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA) -{ - return (int32_t)glue(ldl, MEMSUFFIX)(EA); -} - +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE) static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA) { uint64_t tmp = glue(ldq, MEMSUFFIX)(EA); @@ -55,6 +50,13 @@ static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA) ((tmp & 0x000000000000FF00ULL) << 40) | ((tmp & 0x00000000000000FFULL) << 54); } +#endif + +#if defined(TARGET_PPC64) +static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA) +{ + return (int32_t)glue(ldl, MEMSUFFIX)(EA); +} static inline int64_t glue(ld32rs, MEMSUFFIX) (target_ulong EA) { @@ -77,7 +79,7 @@ static inline void glue(st32r, MEMSUFFIX) (target_ulong EA, uint32_t data) glue(stl, MEMSUFFIX)(EA, tmp); } -#if defined(TARGET_PPC64) +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE) static inline void glue(st64r, MEMSUFFIX) (target_ulong EA, uint64_t data) { uint64_t tmp = ((data & 0xFF00000000000000ULL) >> 56) | @@ -839,4 +841,262 @@ void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void) RETURN(); } +#if defined(TARGET_PPCSPE) +/* SPE extension */ +#define _PPC_SPE_LD_OP(name, op) \ +void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void) \ +{ \ + T1_64 = glue(op, MEMSUFFIX)((uint32_t)T0); \ + RETURN(); \ +} + +#if defined(TARGET_PPC64) +#define _PPC_SPE_LD_OP_64(name, op) \ +void OPPROTO glue(glue(glue(op_spe_l, name), _64), MEMSUFFIX) (void) \ +{ \ + T1_64 = glue(op, MEMSUFFIX)((uint64_t)T0); \ + RETURN(); \ +} +#define PPC_SPE_LD_OP(name, op) \ +_PPC_SPE_LD_OP(name, op); \ +_PPC_SPE_LD_OP_64(name, op) +#else +#define PPC_SPE_LD_OP(name, op) \ +_PPC_SPE_LD_OP(name, op) +#endif + + +#define _PPC_SPE_ST_OP(name, op) \ +void OPPROTO glue(glue(op_spe_st, name), MEMSUFFIX) (void) \ +{ \ + glue(op, MEMSUFFIX)((uint32_t)T0, T1_64); \ + RETURN(); \ +} + +#if defined(TARGET_PPC64) +#define _PPC_SPE_ST_OP_64(name, op) \ +void OPPROTO glue(glue(glue(op_spe_st, name), _64), MEMSUFFIX) (void) \ +{ \ + glue(op, MEMSUFFIX)((uint64_t)T0, T1_64); \ + RETURN(); \ +} +#define PPC_SPE_ST_OP(name, op) \ +_PPC_SPE_ST_OP(name, op); \ +_PPC_SPE_ST_OP_64(name, op) +#else +#define PPC_SPE_ST_OP(name, op) \ +_PPC_SPE_ST_OP(name, op) +#endif + +#if !defined(TARGET_PPC64) +PPC_SPE_LD_OP(dd, ldq); +PPC_SPE_ST_OP(dd, stq); +PPC_SPE_LD_OP(dd_le, ld64r); +PPC_SPE_ST_OP(dd_le, st64r); +#endif +static inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(ldl, MEMSUFFIX)(EA) << 32; + ret |= (uint64_t)glue(ldl, MEMSUFFIX)(EA + 4); + return ret; +} +PPC_SPE_LD_OP(dw, spe_ldw); +static inline void glue(spe_stdw, MEMSUFFIX) (target_ulong EA, uint64_t data) +{ + glue(stl, MEMSUFFIX)(EA, data >> 32); + glue(stl, MEMSUFFIX)(EA + 4, data); +} +PPC_SPE_ST_OP(dw, spe_stdw); +static inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(ld32r, MEMSUFFIX)(EA) << 32; + ret |= (uint64_t)glue(ld32r, MEMSUFFIX)(EA + 4); + return ret; +} +PPC_SPE_LD_OP(dw_le, spe_ldw_le); +static inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA, + uint64_t data) +{ + glue(st32r, MEMSUFFIX)(EA, data >> 32); + glue(st32r, MEMSUFFIX)(EA + 4, data); +} +PPC_SPE_ST_OP(dw_le, spe_stdw_le); +static inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 48; + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2) << 32; + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 4) << 16; + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 6); + return ret; +} +PPC_SPE_LD_OP(dh, spe_ldh); +static inline void glue(spe_stdh, MEMSUFFIX) (target_ulong EA, uint64_t data) +{ + glue(stw, MEMSUFFIX)(EA, data >> 48); + glue(stw, MEMSUFFIX)(EA + 2, data >> 32); + glue(stw, MEMSUFFIX)(EA + 4, data >> 16); + glue(stw, MEMSUFFIX)(EA + 6, data); +} +PPC_SPE_ST_OP(dh, spe_stdh); +static inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48; + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 32; + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 4) << 16; + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 6); + return ret; +} +PPC_SPE_LD_OP(dh_le, spe_ldh_le); +static inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA, + uint64_t data) +{ + glue(st16r, MEMSUFFIX)(EA, data >> 48); + glue(st16r, MEMSUFFIX)(EA + 2, data >> 32); + glue(st16r, MEMSUFFIX)(EA + 4, data >> 16); + glue(st16r, MEMSUFFIX)(EA + 6, data); +} +PPC_SPE_ST_OP(dh_le, spe_stdh_le); +static inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 48; + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2) << 16; + return ret; +} +PPC_SPE_LD_OP(whe, spe_lwhe); +static inline void glue(spe_stwhe, MEMSUFFIX) (target_ulong EA, uint64_t data) +{ + glue(stw, MEMSUFFIX)(EA, data >> 48); + glue(stw, MEMSUFFIX)(EA + 2, data >> 16); +} +PPC_SPE_ST_OP(whe, spe_stwhe); +static inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48; + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 16; + return ret; +} +PPC_SPE_LD_OP(whe_le, spe_lwhe_le); +static inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA, + uint64_t data) +{ + glue(st16r, MEMSUFFIX)(EA, data >> 48); + glue(st16r, MEMSUFFIX)(EA + 2, data >> 16); +} +PPC_SPE_ST_OP(whe_le, spe_stwhe_le); +static inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 32; + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2); + return ret; +} +PPC_SPE_LD_OP(whou, spe_lwhou); +static inline uint64_t glue(spe_lwhos, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = ((uint64_t)((int32_t)glue(ldsw, MEMSUFFIX)(EA))) << 32; + ret |= (uint64_t)((int32_t)glue(ldsw, MEMSUFFIX)(EA + 2)); + return ret; +} +PPC_SPE_LD_OP(whos, spe_lwhos); +static inline void glue(spe_stwho, MEMSUFFIX) (target_ulong EA, uint64_t data) +{ + glue(stw, MEMSUFFIX)(EA, data >> 32); + glue(stw, MEMSUFFIX)(EA + 2, data); +} +PPC_SPE_ST_OP(who, spe_stwho); +static inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 32; + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2); + return ret; +} +PPC_SPE_LD_OP(whou_le, spe_lwhou_le); +static inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + ret = ((uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA))) << 32; + ret |= (uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA + 2)); + return ret; +} +PPC_SPE_LD_OP(whos_le, spe_lwhos_le); +static inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA, + uint64_t data) +{ + glue(st16r, MEMSUFFIX)(EA, data >> 32); + glue(st16r, MEMSUFFIX)(EA + 2, data); +} +PPC_SPE_ST_OP(who_le, spe_stwho_le); +#if !defined(TARGET_PPC64) +static inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA, uint64_t data) +{ + glue(stl, MEMSUFFIX)(EA, data); +} +PPC_SPE_ST_OP(wwo, spe_stwwo); +static inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA, + uint64_t data) +{ + glue(st32r, MEMSUFFIX)(EA, data); +} +PPC_SPE_ST_OP(wwo_le, spe_stwwo_le); +#endif +static inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA) +{ + uint16_t tmp; + tmp = glue(lduw, MEMSUFFIX)(EA); + return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16); +} +PPC_SPE_LD_OP(h, spe_lh); +static inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA) +{ + uint16_t tmp; + tmp = glue(ld16r, MEMSUFFIX)(EA); + return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16); +} +PPC_SPE_LD_OP(h_le, spe_lh_le); +static inline uint64_t glue(spe_lwwsplat, MEMSUFFIX) (target_ulong EA) +{ + uint32_t tmp; + tmp = glue(ldl, MEMSUFFIX)(EA); + return ((uint64_t)tmp << 32) | (uint64_t)tmp; +} +PPC_SPE_LD_OP(wwsplat, spe_lwwsplat); +static inline uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA) +{ + uint32_t tmp; + tmp = glue(ld32r, MEMSUFFIX)(EA); + return ((uint64_t)tmp << 32) | (uint64_t)tmp; +} +PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le); +static inline uint64_t glue(spe_lwhsplat, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + uint16_t tmp; + tmp = glue(lduw, MEMSUFFIX)(EA); + ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32); + tmp = glue(lduw, MEMSUFFIX)(EA + 2); + ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp; + return ret; +} +PPC_SPE_LD_OP(whsplat, spe_lwhsplat); +static inline uint64_t glue(spe_lwhsplat_le, MEMSUFFIX) (target_ulong EA) +{ + uint64_t ret; + uint16_t tmp; + tmp = glue(ld16r, MEMSUFFIX)(EA); + ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32); + tmp = glue(ld16r, MEMSUFFIX)(EA + 2); + ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp; + return ret; +} +PPC_SPE_LD_OP(whsplat_le, spe_lwhsplat_le); +#endif /* defined(TARGET_PPCSPE) */ + #undef MEMSUFFIX diff --git a/target-ppc/op_template.h b/target-ppc/op_template.h index 511d065..bcef1a5 100644 --- a/target-ppc/op_template.h +++ b/target-ppc/op_template.h @@ -57,6 +57,48 @@ void OPPROTO glue(op_store_T2_gpr_gpr, REG) (void) } #endif +#if defined(TARGET_PPCSPE) +void OPPROTO glue(op_load_gpr64_T0_gpr, REG) (void) +{ + T0_64 = regs->gpr[REG]; + RETURN(); +} + +void OPPROTO glue(op_load_gpr64_T1_gpr, REG) (void) +{ + T1_64 = regs->gpr[REG]; + RETURN(); +} + +#if 0 // unused +void OPPROTO glue(op_load_gpr64_T2_gpr, REG) (void) +{ + T2_64 = regs->gpr[REG]; + RETURN(); +} +#endif + +void OPPROTO glue(op_store_T0_gpr64_gpr, REG) (void) +{ + regs->gpr[REG] = T0_64; + RETURN(); +} + +void OPPROTO glue(op_store_T1_gpr64_gpr, REG) (void) +{ + regs->gpr[REG] = T1_64; + RETURN(); +} + +#if 0 // unused +void OPPROTO glue(op_store_T2_gpr64_gpr, REG) (void) +{ + regs->gpr[REG] = T2_64; + RETURN(); +} +#endif +#endif /* defined(TARGET_PPCSPE) */ + #if REG <= 7 /* Condition register moves */ void OPPROTO glue(op_load_crf_T0_crf, REG) (void) diff --git a/target-ppc/translate.c b/target-ppc/translate.c index d9be35d..90a9fc9 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -160,6 +160,9 @@ typedef struct DisasContext { int sf_mode; #endif int fpu_enabled; +#if defined(TARGET_PPCSPE) + int spe_enabled; +#endif ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; } DisasContext; @@ -168,7 +171,7 @@ struct opc_handler_t { /* invalid bits */ uint32_t inval; /* instruction type */ - uint32_t type; + uint64_t type; /* handler */ void (*handler)(DisasContext *ctx); #if defined(DO_PPC_STATISTICS) @@ -4468,6 +4471,814 @@ GEN_HANDLER(icbt_440, 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE) */ } +#if defined(TARGET_PPCSPE) +/*** SPE extension ***/ + +/* Register moves */ +GEN32(gen_op_load_gpr64_T0, gen_op_load_gpr64_T0_gpr); +GEN32(gen_op_load_gpr64_T1, gen_op_load_gpr64_T1_gpr); +#if 0 // unused +GEN32(gen_op_load_gpr64_T2, gen_op_load_gpr64_T2_gpr); +#endif + +GEN32(gen_op_store_T0_gpr64, gen_op_store_T0_gpr64_gpr); +GEN32(gen_op_store_T1_gpr64, gen_op_store_T1_gpr64_gpr); +#if 0 // unused +GEN32(gen_op_store_T2_gpr64, gen_op_store_T2_gpr64_gpr); +#endif + +#define GEN_SPE(name0, name1, opc2, opc3, inval, type) \ +GEN_HANDLER(name0##_##name1, 0x04, opc2, opc3, inval, type) \ +{ \ + if (Rc(ctx->opcode)) \ + gen_##name1(ctx); \ + else \ + gen_##name0(ctx); \ +} + +/* Handler for undefined SPE opcodes */ +static inline void gen_speundef (DisasContext *ctx) +{ + RET_INVAL(ctx); +} + +/* SPE load and stores */ +static inline void gen_addr_spe_imm_index (DisasContext *ctx, int sh) +{ + target_long simm = rB(ctx->opcode); + + if (rA(ctx->opcode) == 0) { + gen_set_T0(simm << sh); + } else { + gen_op_load_gpr_T0(rA(ctx->opcode)); + if (likely(simm != 0)) + gen_op_addi(simm << sh); + } +} + +#define op_spe_ldst(name) (*gen_op_##name[ctx->mem_idx])() +#if defined(CONFIG_USER_ONLY) +#if defined(TARGET_PPC64) +#define OP_SPE_LD_TABLE(name) \ +static GenOpFunc *gen_op_spe_l##name[] = { \ + &gen_op_spe_l##name##_raw, \ + &gen_op_spe_l##name##_le_raw, \ + &gen_op_spe_l##name##_64_raw, \ + &gen_op_spe_l##name##_le_64_raw, \ +}; +#define OP_SPE_ST_TABLE(name) \ +static GenOpFunc *gen_op_spe_st##name[] = { \ + &gen_op_spe_st##name##_raw, \ + &gen_op_spe_st##name##_le_raw, \ + &gen_op_spe_st##name##_64_raw, \ + &gen_op_spe_st##name##_le_64_raw, \ +}; +#else /* defined(TARGET_PPC64) */ +#define OP_SPE_LD_TABLE(name) \ +static GenOpFunc *gen_op_spe_l##name[] = { \ + &gen_op_spe_l##name##_raw, \ + &gen_op_spe_l##name##_le_raw, \ +}; +#define OP_SPE_ST_TABLE(name) \ +static GenOpFunc *gen_op_spe_st##name[] = { \ + &gen_op_spe_st##name##_raw, \ + &gen_op_spe_st##name##_le_raw, \ +}; +#endif /* defined(TARGET_PPC64) */ +#else /* defined(CONFIG_USER_ONLY) */ +#if defined(TARGET_PPC64) +#define OP_SPE_LD_TABLE(name) \ +static GenOpFunc *gen_op_spe_l##name[] = { \ + &gen_op_spe_l##name##_user, \ + &gen_op_spe_l##name##_le_user, \ + &gen_op_spe_l##name##_kernel, \ + &gen_op_spe_l##name##_le_kernel, \ + &gen_op_spe_l##name##_64_user, \ + &gen_op_spe_l##name##_le_64_user, \ + &gen_op_spe_l##name##_64_kernel, \ + &gen_op_spe_l##name##_le_64_kernel, \ +}; +#define OP_SPE_ST_TABLE(name) \ +static GenOpFunc *gen_op_spe_st##name[] = { \ + &gen_op_spe_st##name##_user, \ + &gen_op_spe_st##name##_le_user, \ + &gen_op_spe_st##name##_kernel, \ + &gen_op_spe_st##name##_le_kernel, \ + &gen_op_spe_st##name##_64_user, \ + &gen_op_spe_st##name##_le_64_user, \ + &gen_op_spe_st##name##_64_kernel, \ + &gen_op_spe_st##name##_le_64_kernel, \ +}; +#else /* defined(TARGET_PPC64) */ +#define OP_SPE_LD_TABLE(name) \ +static GenOpFunc *gen_op_spe_l##name[] = { \ + &gen_op_spe_l##name##_user, \ + &gen_op_spe_l##name##_le_user, \ + &gen_op_spe_l##name##_kernel, \ + &gen_op_spe_l##name##_le_kernel, \ +}; +#define OP_SPE_ST_TABLE(name) \ +static GenOpFunc *gen_op_spe_st##name[] = { \ + &gen_op_spe_st##name##_user, \ + &gen_op_spe_st##name##_le_user, \ + &gen_op_spe_st##name##_kernel, \ + &gen_op_spe_st##name##_le_kernel, \ +}; +#endif /* defined(TARGET_PPC64) */ +#endif /* defined(CONFIG_USER_ONLY) */ + +#define GEN_SPE_LD(name, sh) \ +static inline void gen_evl##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_addr_spe_imm_index(ctx, sh); \ + op_spe_ldst(spe_l##name); \ + gen_op_store_T1_gpr64(rD(ctx->opcode)); \ +} + +#define GEN_SPE_LDX(name) \ +static inline void gen_evl##name##x (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_addr_reg_index(ctx); \ + op_spe_ldst(spe_l##name); \ + gen_op_store_T1_gpr64(rD(ctx->opcode)); \ +} + +#define GEN_SPEOP_LD(name, sh) \ +OP_SPE_LD_TABLE(name); \ +GEN_SPE_LD(name, sh); \ +GEN_SPE_LDX(name) + +#define GEN_SPE_ST(name, sh) \ +static inline void gen_evst##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_addr_spe_imm_index(ctx, sh); \ + gen_op_load_gpr64_T1(rS(ctx->opcode)); \ + op_spe_ldst(spe_st##name); \ +} + +#define GEN_SPE_STX(name) \ +static inline void gen_evst##name##x (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_addr_reg_index(ctx); \ + gen_op_load_gpr64_T1(rS(ctx->opcode)); \ + op_spe_ldst(spe_st##name); \ +} + +#define GEN_SPEOP_ST(name, sh) \ +OP_SPE_ST_TABLE(name); \ +GEN_SPE_ST(name, sh); \ +GEN_SPE_STX(name) + +#define GEN_SPEOP_LDST(name, sh) \ +GEN_SPEOP_LD(name, sh); \ +GEN_SPEOP_ST(name, sh) + +/* SPE arithmetic and logic */ +#define GEN_SPEOP_ARITH2(name) \ +static inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_op_load_gpr64_T0(rA(ctx->opcode)); \ + gen_op_load_gpr64_T1(rB(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_gpr64(rD(ctx->opcode)); \ +} + +#define GEN_SPEOP_ARITH1(name) \ +static inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_op_load_gpr64_T0(rA(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_gpr64(rD(ctx->opcode)); \ +} + +#define GEN_SPEOP_COMP(name) \ +static inline void gen_##name (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_op_load_gpr64_T0(rA(ctx->opcode)); \ + gen_op_load_gpr64_T1(rB(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_crf(crfD(ctx->opcode)); \ +} + +/* Logical */ +GEN_SPEOP_ARITH2(evand); +GEN_SPEOP_ARITH2(evandc); +GEN_SPEOP_ARITH2(evxor); +GEN_SPEOP_ARITH2(evor); +GEN_SPEOP_ARITH2(evnor); +GEN_SPEOP_ARITH2(eveqv); +GEN_SPEOP_ARITH2(evorc); +GEN_SPEOP_ARITH2(evnand); +GEN_SPEOP_ARITH2(evsrwu); +GEN_SPEOP_ARITH2(evsrws); +GEN_SPEOP_ARITH2(evslw); +GEN_SPEOP_ARITH2(evrlw); +GEN_SPEOP_ARITH2(evmergehi); +GEN_SPEOP_ARITH2(evmergelo); +GEN_SPEOP_ARITH2(evmergehilo); +GEN_SPEOP_ARITH2(evmergelohi); + +/* Arithmetic */ +GEN_SPEOP_ARITH2(evaddw); +GEN_SPEOP_ARITH2(evsubfw); +GEN_SPEOP_ARITH1(evabs); +GEN_SPEOP_ARITH1(evneg); +GEN_SPEOP_ARITH1(evextsb); +GEN_SPEOP_ARITH1(evextsh); +GEN_SPEOP_ARITH1(evrndw); +GEN_SPEOP_ARITH1(evcntlzw); +GEN_SPEOP_ARITH1(evcntlsw); +static inline void gen_brinc (DisasContext *ctx) +{ + /* Note: brinc is usable even if SPE is disabled */ + gen_op_load_gpr64_T0(rA(ctx->opcode)); + gen_op_load_gpr64_T1(rB(ctx->opcode)); + gen_op_brinc(); + gen_op_store_T0_gpr64(rD(ctx->opcode)); +} + +#define GEN_SPEOP_ARITH_IMM2(name) \ +static inline void gen_##name##i (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_op_load_gpr64_T0(rB(ctx->opcode)); \ + gen_op_splatwi_T1_64(rA(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_gpr64(rD(ctx->opcode)); \ +} + +#define GEN_SPEOP_LOGIC_IMM2(name) \ +static inline void gen_##name##i (DisasContext *ctx) \ +{ \ + if (unlikely(!ctx->spe_enabled)) { \ + RET_EXCP(ctx, EXCP_NO_SPE, 0); \ + return; \ + } \ + gen_op_load_gpr64_T0(rA(ctx->opcode)); \ + gen_op_splatwi_T1_64(rB(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_gpr64(rD(ctx->opcode)); \ +} + +GEN_SPEOP_ARITH_IMM2(evaddw); +#define gen_evaddiw gen_evaddwi +GEN_SPEOP_ARITH_IMM2(evsubfw); +#define gen_evsubifw gen_evsubfwi +GEN_SPEOP_LOGIC_IMM2(evslw); +GEN_SPEOP_LOGIC_IMM2(evsrwu); +#define gen_evsrwis gen_evsrwsi +GEN_SPEOP_LOGIC_IMM2(evsrws); +#define gen_evsrwiu gen_evsrwui +GEN_SPEOP_LOGIC_IMM2(evrlw); + +static inline void gen_evsplati (DisasContext *ctx) +{ + int32_t imm = (int32_t)(rA(ctx->opcode) << 27) >> 27; + + gen_op_splatwi_T0_64(imm); + gen_op_store_T0_gpr64(rD(ctx->opcode)); +} + +static inline void gen_evsplatfi (DisasContext *ctx) +{ + uint32_t imm = rA(ctx->opcode) << 27; + + gen_op_splatwi_T0_64(imm); + gen_op_store_T0_gpr64(rD(ctx->opcode)); +} + +/* Comparison */ +GEN_SPEOP_COMP(evcmpgtu); +GEN_SPEOP_COMP(evcmpgts); +GEN_SPEOP_COMP(evcmpltu); +GEN_SPEOP_COMP(evcmplts); +GEN_SPEOP_COMP(evcmpeq); + +GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, PPC_SPE); +GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, PPC_SPE); +GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, PPC_SPE); //// +GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, PPC_SPE); //// +GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, PPC_SPE); //// +GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x00000000, PPC_SPE); // +GEN_SPE(speundef, evand, 0x08, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(speundef, evorc, 0x0D, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, PPC_SPE); +GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, PPC_SPE); +GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, PPC_SPE); // +GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, PPC_SPE); +GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, PPC_SPE); //// +GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, PPC_SPE); //// +GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, PPC_SPE); //// +GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); //// + +static inline void gen_evsel (DisasContext *ctx) +{ + if (unlikely(!ctx->spe_enabled)) { + RET_EXCP(ctx, EXCP_NO_SPE, 0); + return; + } + gen_op_load_crf_T0(ctx->opcode & 0x7); + gen_op_load_gpr64_T0(rA(ctx->opcode)); + gen_op_load_gpr64_T1(rB(ctx->opcode)); + gen_op_evsel(); + gen_op_store_T0_gpr64(rD(ctx->opcode)); +} + +GEN_HANDLER(evsel0, 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE) +{ + gen_evsel(ctx); +} +GEN_HANDLER(evsel1, 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE) +{ + gen_evsel(ctx); +} +GEN_HANDLER(evsel2, 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE) +{ + gen_evsel(ctx); +} +GEN_HANDLER(evsel3, 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE) +{ + gen_evsel(ctx); +} + +/* Load and stores */ +#if defined(TARGET_PPC64) +/* In that case, we already have 64 bits load & stores + * so, spe_ldd is equivalent to ld and spe_std is equivalent to std + */ +#if defined(CONFIG_USER_ONLY) +#define gen_op_spe_ldd_raw gen_op_ld_raw +#define gen_op_spe_ldd_64_raw gen_op_ld_64_raw +#define gen_op_spe_ldd_le_raw gen_op_ld_le_raw +#define gen_op_spe_ldd_le_64_raw gen_op_ld_le_64_raw +#define gen_op_spe_stdd_raw gen_op_ld_raw +#define gen_op_spe_stdd_64_raw gen_op_std_64_raw +#define gen_op_spe_stdd_le_raw gen_op_std_le_raw +#define gen_op_spe_stdd_le_64_raw gen_op_std_le_64_raw +#else /* defined(CONFIG_USER_ONLY) */ +#define gen_op_spe_ldd_kernel gen_op_ld_kernel +#define gen_op_spe_ldd_64_kernel gen_op_ld_64_kernel +#define gen_op_spe_ldd_le_kernel gen_op_ld_kernel +#define gen_op_spe_ldd_le_64_kernel gen_op_ld_64_kernel +#define gen_op_spe_ldd_user gen_op_ld_user +#define gen_op_spe_ldd_64_user gen_op_ld_64_user +#define gen_op_spe_ldd_le_user gen_op_ld_le_user +#define gen_op_spe_ldd_le_64_user gen_op_ld_le_64_user +#define gen_op_spe_stdd_kernel gen_op_std_kernel +#define gen_op_spe_stdd_64_kernel gen_op_std_64_kernel +#define gen_op_spe_stdd_le_kernel gen_op_std_kernel +#define gen_op_spe_stdd_le_64_kernel gen_op_std_64_kernel +#define gen_op_spe_stdd_user gen_op_std_user +#define gen_op_spe_stdd_64_user gen_op_std_64_user +#define gen_op_spe_stdd_le_user gen_op_std_le_user +#define gen_op_spe_stdd_le_64_user gen_op_std_le_64_user +#endif /* defined(CONFIG_USER_ONLY) */ +#endif /* defined(TARGET_PPC64) */ +GEN_SPEOP_LDST(dd, 3); +GEN_SPEOP_LDST(dw, 3); +GEN_SPEOP_LDST(dh, 3); +GEN_SPEOP_LDST(whe, 2); +GEN_SPEOP_LD(whou, 2); +GEN_SPEOP_LD(whos, 2); +GEN_SPEOP_ST(who, 2); + +#if defined(TARGET_PPC64) +/* In that case, spe_stwwo is equivalent to stw */ +#if defined(CONFIG_USER_ONLY) +#define gen_op_spe_stwwo_raw gen_op_stw_raw +#define gen_op_spe_stwwo_le_raw gen_op_stw_le_raw +#define gen_op_spe_stwwo_64_raw gen_op_stw_64_raw +#define gen_op_spe_stwwo_le_64_raw gen_op_stw_le_64_raw +#else +#define gen_op_spe_stwwo_user gen_op_stw_user +#define gen_op_spe_stwwo_le_user gen_op_stw_le_user +#define gen_op_spe_stwwo_64_user gen_op_stw_64_user +#define gen_op_spe_stwwo_le_64_user gen_op_stw_le_64_user +#define gen_op_spe_stwwo_kernel gen_op_stw_kernel +#define gen_op_spe_stwwo_le_kernel gen_op_stw_le_kernel +#define gen_op_spe_stwwo_64_kernel gen_op_stw_64_kernel +#define gen_op_spe_stwwo_le_64_kernel gen_op_stw_le_64_kernel +#endif +#endif +#define _GEN_OP_SPE_STWWE(suffix) \ +static inline void gen_op_spe_stwwe_##suffix (void) \ +{ \ + gen_op_srli32_T1_64(); \ + gen_op_spe_stwwo_##suffix(); \ +} +#define _GEN_OP_SPE_STWWE_LE(suffix) \ +static inline void gen_op_spe_stwwe_le_##suffix (void) \ +{ \ + gen_op_srli32_T1_64(); \ + gen_op_spe_stwwo_le_##suffix(); \ +} +#if defined(TARGET_PPC64) +#define GEN_OP_SPE_STWWE(suffix) \ +_GEN_OP_SPE_STWWE(suffix); \ +_GEN_OP_SPE_STWWE_LE(suffix); \ +static inline void gen_op_spe_stwwe_64_##suffix (void) \ +{ \ + gen_op_srli32_T1_64(); \ + gen_op_spe_stwwo_64_##suffix(); \ +} \ +static inline void gen_op_spe_stwwe_le_64_##suffix (void) \ +{ \ + gen_op_srli32_T1_64(); \ + gen_op_spe_stwwo_le_64_##suffix(); \ +} +#else +#define GEN_OP_SPE_STWWE(suffix) \ +_GEN_OP_SPE_STWWE(suffix); \ +_GEN_OP_SPE_STWWE_LE(suffix) +#endif +#if defined(CONFIG_USER_ONLY) +GEN_OP_SPE_STWWE(raw); +#else /* defined(CONFIG_USER_ONLY) */ +GEN_OP_SPE_STWWE(kernel); +GEN_OP_SPE_STWWE(user); +#endif /* defined(CONFIG_USER_ONLY) */ +GEN_SPEOP_ST(wwe, 2); +GEN_SPEOP_ST(wwo, 2); + +#define GEN_SPE_LDSPLAT(name, op, suffix) \ +static inline void gen_op_spe_l##name##_##suffix (void) \ +{ \ + gen_op_##op##_##suffix(); \ + gen_op_splatw_T1_64(); \ +} + +#define GEN_OP_SPE_LHE(suffix) \ +static inline void gen_op_spe_lhe_##suffix (void) \ +{ \ + gen_op_spe_lh_##suffix(); \ + gen_op_sli16_T1_64(); \ +} + +#define GEN_OP_SPE_LHX(suffix) \ +static inline void gen_op_spe_lhx_##suffix (void) \ +{ \ + gen_op_spe_lh_##suffix(); \ + gen_op_extsh_T1_64(); \ +} + +#if defined(CONFIG_USER_ONLY) +GEN_OP_SPE_LHE(raw); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, raw); +GEN_OP_SPE_LHE(le_raw); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_raw); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, raw); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_raw); +GEN_OP_SPE_LHX(raw); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, raw); +GEN_OP_SPE_LHX(le_raw); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_raw); +#if defined(TARGET_PPC64) +GEN_OP_SPE_LHE(64_raw); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_raw); +GEN_OP_SPE_LHE(le_64_raw); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_raw); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_raw); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_raw); +GEN_OP_SPE_LHX(64_raw); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_raw); +GEN_OP_SPE_LHX(le_64_raw); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_raw); +#endif +#else +GEN_OP_SPE_LHE(kernel); +GEN_OP_SPE_LHE(user); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, kernel); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, user); +GEN_OP_SPE_LHE(le_kernel); +GEN_OP_SPE_LHE(le_user); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_kernel); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_user); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, kernel); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, user); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_kernel); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_user); +GEN_OP_SPE_LHX(kernel); +GEN_OP_SPE_LHX(user); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, kernel); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, user); +GEN_OP_SPE_LHX(le_kernel); +GEN_OP_SPE_LHX(le_user); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_kernel); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_user); +#if defined(TARGET_PPC64) +GEN_OP_SPE_LHE(64_kernel); +GEN_OP_SPE_LHE(64_user); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_kernel); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_user); +GEN_OP_SPE_LHE(le_64_kernel); +GEN_OP_SPE_LHE(le_64_user); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_kernel); +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_user); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_kernel); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_user); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_kernel); +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_user); +GEN_OP_SPE_LHX(64_kernel); +GEN_OP_SPE_LHX(64_user); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_kernel); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_user); +GEN_OP_SPE_LHX(le_64_kernel); +GEN_OP_SPE_LHX(le_64_user); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_kernel); +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_user); +#endif +#endif +GEN_SPEOP_LD(hhesplat, 1); +GEN_SPEOP_LD(hhousplat, 1); +GEN_SPEOP_LD(hhossplat, 1); +GEN_SPEOP_LD(wwsplat, 2); +GEN_SPEOP_LD(whsplat, 2); + +GEN_SPE(evlddx, evldd, 0x00, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evldwx, evldw, 0x01, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evldhx, evldh, 0x02, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlhhesplatx, evlhhesplat, 0x04, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlhhousplatx, evlhhousplat, 0x06, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlhhossplatx, evlhhossplat, 0x07, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlwhex, evlwhe, 0x08, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlwhoux, evlwhou, 0x0A, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlwhosx, evlwhos, 0x0B, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlwwsplatx, evlwwsplat, 0x0C, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evlwhsplatx, evlwhsplat, 0x0E, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstddx, evstdd, 0x10, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstdwx, evstdw, 0x11, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstdhx, evstdh, 0x12, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstwhex, evstwhe, 0x18, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstwhox, evstwho, 0x1A, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstwwex, evstwwe, 0x1C, 0x0C, 0x00000000, PPC_SPE); // +GEN_SPE(evstwwox, evstwwo, 0x1E, 0x0C, 0x00000000, PPC_SPE); // + +/* Multiply and add - TODO */ +#if 0 +GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0x00000000, PPC_SPE); + +GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0x00000000, PPC_SPE); + +GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, PPC_SPE); +GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, PPC_SPE); +GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, PPC_SPE); +GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, PPC_SPE); +GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, PPC_SPE); +GEN_SPE(evmra, speundef, 0x07, 0x13, 0x0000F800, PPC_SPE); + +GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0x00000000, PPC_SPE); + +GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0x00000000, PPC_SPE); +GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0x00000000, PPC_SPE); + +GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0x00000000, PPC_SPE); + +GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, PPC_SPE); +GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0x00000000, PPC_SPE); +GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, PPC_SPE); +GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0x00000000, PPC_SPE); +#endif + +/*** SPE floating-point extension ***/ +#define GEN_SPEFPUOP_CONV(name) \ +static inline void gen_##name (DisasContext *ctx) \ +{ \ + gen_op_load_gpr64_T0(rB(ctx->opcode)); \ + gen_op_##name(); \ + gen_op_store_T0_gpr64(rD(ctx->opcode)); \ +} + +/* Single precision floating-point vectors operations */ +/* Arithmetic */ +GEN_SPEOP_ARITH2(evfsadd); +GEN_SPEOP_ARITH2(evfssub); +GEN_SPEOP_ARITH2(evfsmul); +GEN_SPEOP_ARITH2(evfsdiv); +GEN_SPEOP_ARITH1(evfsabs); +GEN_SPEOP_ARITH1(evfsnabs); +GEN_SPEOP_ARITH1(evfsneg); +/* Conversion */ +GEN_SPEFPUOP_CONV(evfscfui); +GEN_SPEFPUOP_CONV(evfscfsi); +GEN_SPEFPUOP_CONV(evfscfuf); +GEN_SPEFPUOP_CONV(evfscfsf); +GEN_SPEFPUOP_CONV(evfsctui); +GEN_SPEFPUOP_CONV(evfsctsi); +GEN_SPEFPUOP_CONV(evfsctuf); +GEN_SPEFPUOP_CONV(evfsctsf); +GEN_SPEFPUOP_CONV(evfsctuiz); +GEN_SPEFPUOP_CONV(evfsctsiz); +/* Comparison */ +GEN_SPEOP_COMP(evfscmpgt); +GEN_SPEOP_COMP(evfscmplt); +GEN_SPEOP_COMP(evfscmpeq); +GEN_SPEOP_COMP(evfststgt); +GEN_SPEOP_COMP(evfststlt); +GEN_SPEOP_COMP(evfststeq); + +/* Opcodes definitions */ +GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, PPC_SPEFPU); // +GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, PPC_SPEFPU); // +GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, PPC_SPEFPU); // +GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, PPC_SPEFPU); // +GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, PPC_SPEFPU); // +GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, PPC_SPEFPU); // +GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, PPC_SPEFPU); // + +/* Single precision floating-point operations */ +/* Arithmetic */ +GEN_SPEOP_ARITH2(efsadd); +GEN_SPEOP_ARITH2(efssub); +GEN_SPEOP_ARITH2(efsmul); +GEN_SPEOP_ARITH2(efsdiv); +GEN_SPEOP_ARITH1(efsabs); +GEN_SPEOP_ARITH1(efsnabs); +GEN_SPEOP_ARITH1(efsneg); +/* Conversion */ +GEN_SPEFPUOP_CONV(efscfui); +GEN_SPEFPUOP_CONV(efscfsi); +GEN_SPEFPUOP_CONV(efscfuf); +GEN_SPEFPUOP_CONV(efscfsf); +GEN_SPEFPUOP_CONV(efsctui); +GEN_SPEFPUOP_CONV(efsctsi); +GEN_SPEFPUOP_CONV(efsctuf); +GEN_SPEFPUOP_CONV(efsctsf); +GEN_SPEFPUOP_CONV(efsctuiz); +GEN_SPEFPUOP_CONV(efsctsiz); +GEN_SPEFPUOP_CONV(efscfd); +/* Comparison */ +GEN_SPEOP_COMP(efscmpgt); +GEN_SPEOP_COMP(efscmplt); +GEN_SPEOP_COMP(efscmpeq); +GEN_SPEOP_COMP(efststgt); +GEN_SPEOP_COMP(efststlt); +GEN_SPEOP_COMP(efststeq); + +/* Opcodes definitions */ +GEN_SPE(efsadd, efssub, 0x00, 0x0A, 0x00000000, PPC_SPEFPU); // +GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, PPC_SPEFPU); // +GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efsctuiz, efsctsiz, 0x0C, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, PPC_SPEFPU); // + +/* Double precision floating-point operations */ +/* Arithmetic */ +GEN_SPEOP_ARITH2(efdadd); +GEN_SPEOP_ARITH2(efdsub); +GEN_SPEOP_ARITH2(efdmul); +GEN_SPEOP_ARITH2(efddiv); +GEN_SPEOP_ARITH1(efdabs); +GEN_SPEOP_ARITH1(efdnabs); +GEN_SPEOP_ARITH1(efdneg); +/* Conversion */ + +GEN_SPEFPUOP_CONV(efdcfui); +GEN_SPEFPUOP_CONV(efdcfsi); +GEN_SPEFPUOP_CONV(efdcfuf); +GEN_SPEFPUOP_CONV(efdcfsf); +GEN_SPEFPUOP_CONV(efdctui); +GEN_SPEFPUOP_CONV(efdctsi); +GEN_SPEFPUOP_CONV(efdctuf); +GEN_SPEFPUOP_CONV(efdctsf); +GEN_SPEFPUOP_CONV(efdctuiz); +GEN_SPEFPUOP_CONV(efdctsiz); +GEN_SPEFPUOP_CONV(efdcfs); +GEN_SPEFPUOP_CONV(efdcfuid); +GEN_SPEFPUOP_CONV(efdcfsid); +GEN_SPEFPUOP_CONV(efdctuidz); +GEN_SPEFPUOP_CONV(efdctsidz); +/* Comparison */ +GEN_SPEOP_COMP(efdcmpgt); +GEN_SPEOP_COMP(efdcmplt); +GEN_SPEOP_COMP(efdcmpeq); +GEN_SPEOP_COMP(efdtstgt); +GEN_SPEOP_COMP(efdtstlt); +GEN_SPEOP_COMP(efdtsteq); + +/* Opcodes definitions */ +GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, PPC_SPEFPU); // +GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, PPC_SPEFPU); // +GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, PPC_SPEFPU); // +GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, PPC_SPEFPU); // +GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, PPC_SPEFPU); // +GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, PPC_SPEFPU); // +#endif + /* End opcode list */ GEN_OPCODE_MARK(end); @@ -4604,9 +5415,9 @@ void cpu_dump_statistics (CPUState *env, FILE*f, } /*****************************************************************************/ -static inline int -gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, - int search_pc) +static inline int gen_intermediate_code_internal (CPUState *env, + TranslationBlock *tb, + int search_pc) { DisasContext ctx, *ctxp = &ctx; opc_handler_t **table, *handler; @@ -4639,6 +5450,9 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb, ctx.sf_mode = msr_sf; #endif ctx.fpu_enabled = msr_fp; +#if defined(TARGET_PPCSPE) + ctx.spe_enabled = msr_spe; +#endif ctx.singlestep_enabled = env->singlestep_enabled; #if defined (DO_SINGLE_STEP) && 0 /* Single step trace mode */ diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index d4159fa..1778ef1 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -30,7 +30,7 @@ struct ppc_def_t { const unsigned char *name; uint32_t pvr; uint32_t pvr_mask; - uint32_t insns_flags; + uint64_t insns_flags; uint32_t flags; uint64_t msr_mask; }; @@ -2424,7 +2424,8 @@ static int create_ppc_opcodes (CPUPPCState *env, ppc_def_t *def) fill_new_table(env->opcodes, 0x40); #if defined(PPC_DUMP_CPU) - printf("* PowerPC instructions for PVR %08x: %s flags %08x %08x\n", + printf("* PowerPC instructions for PVR %08x: %s flags %016 " PRIx64 + " %08x\n", def->pvr, def->name, def->insns_flags, def->flags); #endif if (&opc_start < &opc_end) { |