aboutsummaryrefslogtreecommitdiff
path: root/target/ppc
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-08-24 09:35:21 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-08-24 09:35:21 +0100
commitdd8014e4e904e895435aae9f11a686f072762782 (patch)
treeea1f526128f3d88a92f90cf8833b3adf9c8ff828 /target/ppc
parent8367a77c4d3f6e1e60890f5510304feb2c621611 (diff)
parent3110f0ee19ccdb50adff3dfa1321039f69efddcd (diff)
downloadqemu-dd8014e4e904e895435aae9f11a686f072762782.zip
qemu-dd8014e4e904e895435aae9f11a686f072762782.tar.gz
qemu-dd8014e4e904e895435aae9f11a686f072762782.tar.bz2
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.2-20200818' into staging
ppc patch queue 2020-08-18 Here's my first pull request for qemu-5.2, which has quite a few accumulated things. Highlights are: * Preliminary support for POWER10 (Power ISA 3.1) instruction emulation * Add documentation on the (very confusing) pseries NUMA configuration * Fix some bugs handling edge cases with XICS, XIVE and kernel_irqchip * Fix icount for a number of POWER registers * Many cleanups to error handling in XIVE code * Validate size of -prom-env data # gpg: Signature made Tue 18 Aug 2020 05:18:36 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-5.2-20200818: (40 commits) spapr/xive: Use xive_source_esb_len() nvram: Exit QEMU if NVRAM cannot contain all -prom-env data spapr/xive: Simplify error handling of kvmppc_xive_cpu_synchronize_state() ppc/xive: Simplify error handling in xive_tctx_realize() spapr/xive: Simplify error handling in kvmppc_xive_connect() ppc/xive: Fix error handling in vmstate_xive_tctx_*() callbacks spapr/xive: Fix error handling in kvmppc_xive_post_load() spapr/kvm: Fix error handling in kvmppc_xive_pre_save() spapr/xive: Rework error handling of kvmppc_xive_set_source_config() spapr/xive: Rework error handling in kvmppc_xive_get_queues() spapr/xive: Rework error handling of kvmppc_xive_[gs]et_queue_config() spapr/xive: Rework error handling of kvmppc_xive_cpu_[gs]et_state() spapr/xive: Rework error handling of kvmppc_xive_mmap() spapr/xive: Rework error handling of kvmppc_xive_source_reset() spapr/xive: Rework error handling of kvmppc_xive_cpu_connect() spapr: Simplify error handling in spapr_phb_realize() spapr/xive: Convert KVM device fd checks to assert() ppc/xive: Introduce dedicated kvm_irqchip_in_kernel() wrappers ppc/xive: Rework setup of XiveSource::esb_mmio target/ppc: Integrate icount to purr, vtb, and tbu40 ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/ppc')
-rw-r--r--target/ppc/cpu.h4
-rw-r--r--target/ppc/helper.h5
-rw-r--r--target/ppc/int_helper.c48
-rw-r--r--target/ppc/translate.c40
-rw-r--r--target/ppc/translate/spe-impl.c.inc101
-rw-r--r--target/ppc/translate/vmx-impl.c.inc11
-rw-r--r--target/ppc/translate/vmx-ops.c.inc10
-rw-r--r--target/ppc/translate_init.c.inc32
8 files changed, 197 insertions, 54 deletions
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index e7d382a..3c4e1b3 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -2191,6 +2191,8 @@ enum {
PPC2_PM_ISA206 = 0x0000000000040000ULL,
/* POWER ISA 3.0 */
PPC2_ISA300 = 0x0000000000080000ULL,
+ /* POWER ISA 3.1 */
+ PPC2_ISA310 = 0x0000000000100000ULL,
#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
@@ -2199,7 +2201,7 @@ enum {
PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \
PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \
PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \
- PPC2_ISA300)
+ PPC2_ISA300 | PPC2_ISA310)
};
/*****************************************************************************/
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 90166cb..6a4dccf7 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -184,7 +184,10 @@ DEF_HELPER_3(vmulosw, void, avr, avr, avr)
DEF_HELPER_3(vmuloub, void, avr, avr, avr)
DEF_HELPER_3(vmulouh, void, avr, avr, avr)
DEF_HELPER_3(vmulouw, void, avr, avr, avr)
-DEF_HELPER_3(vmuluwm, void, avr, avr, avr)
+DEF_HELPER_3(vmulhsw, void, avr, avr, avr)
+DEF_HELPER_3(vmulhuw, void, avr, avr, avr)
+DEF_HELPER_3(vmulhsd, void, avr, avr, avr)
+DEF_HELPER_3(vmulhud, void, avr, avr, avr)
DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
DEF_HELPER_3(vsrv, void, avr, avr, avr)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 43ebf1d..b45626f 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -523,19 +523,6 @@ void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
r->VsrD(0) = 0;
}
-#define VARITH_DO(name, op, element) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- r->element[i] = a->element[i] op b->element[i]; \
- } \
- }
-VARITH_DO(muluwm, *, u32)
-#undef VARITH_DO
-#undef VARITH
-
#define VARITHFP(suffix, func) \
void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
ppc_avr_t *b) \
@@ -1099,6 +1086,41 @@ VMUL(uw, u32, VsrW, VsrD, uint64_t)
#undef VMUL_DO_ODD
#undef VMUL
+void helper_vmulhsw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ r->s32[i] = (int32_t)(((int64_t)a->s32[i] * (int64_t)b->s32[i]) >> 32);
+ }
+}
+
+void helper_vmulhuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ r->u32[i] = (uint32_t)(((uint64_t)a->u32[i] *
+ (uint64_t)b->u32[i]) >> 32);
+ }
+}
+
+void helper_vmulhsd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ uint64_t discard;
+
+ muls64(&discard, &r->u64[0], a->s64[0], b->s64[0]);
+ muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]);
+}
+
+void helper_vmulhud(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ uint64_t discard;
+
+ mulu64(&discard, &r->u64[0], a->u64[0], b->u64[0]);
+ mulu64(&discard, &r->u64[1], a->u64[1], b->u64[1]);
+}
+
void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 04db0d8..fedb9b2 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -6971,7 +6971,47 @@ static void gen_dform3D(DisasContext *ctx)
return gen_invalid(ctx);
}
+#if defined(TARGET_PPC64)
+/* brd */
+static void gen_brd(DisasContext *ctx)
+{
+ tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+/* brw */
+static void gen_brw(DisasContext *ctx)
+{
+ tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
+
+}
+
+/* brh */
+static void gen_brh(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(t0, 0x00ff00ff00ff00ffull);
+ tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
+ tcg_gen_and_i64(t2, t1, t0);
+ tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_shli_i64(t1, t1, 8);
+ tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+#endif
+
static opcode_t opcodes[] = {
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
+GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
+GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
+#endif
GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER),
GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc
index 36b4d56..2e6e799 100644
--- a/target/ppc/translate/spe-impl.c.inc
+++ b/target/ppc/translate/spe-impl.c.inc
@@ -349,14 +349,24 @@ static inline void gen_evmergelohi(DisasContext *ctx)
}
static inline void gen_evsplati(DisasContext *ctx)
{
- uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
+ uint64_t imm;
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
}
static inline void gen_evsplatfi(DisasContext *ctx)
{
- uint64_t imm = rA(ctx->opcode) << 27;
+ uint64_t imm;
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ imm = rA(ctx->opcode) << 27;
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
@@ -389,21 +399,37 @@ static inline void gen_evsel(DisasContext *ctx)
static void gen_evsel0(DisasContext *ctx)
{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
gen_evsel(ctx);
}
static void gen_evsel1(DisasContext *ctx)
{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
gen_evsel(ctx);
}
static void gen_evsel2(DisasContext *ctx)
{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
gen_evsel(ctx);
}
static void gen_evsel3(DisasContext *ctx)
{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
gen_evsel(ctx);
}
@@ -518,6 +544,11 @@ static inline void gen_evmwsmia(DisasContext *ctx)
{
TCGv_i64 tmp;
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
gen_evmwsmi(ctx); /* rD := rA * rB */
tmp = tcg_temp_new_i64();
@@ -531,8 +562,13 @@ static inline void gen_evmwsmia(DisasContext *ctx)
static inline void gen_evmwsmiaa(DisasContext *ctx)
{
- TCGv_i64 acc = tcg_temp_new_i64();
- TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 acc;
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
gen_evmwsmi(ctx); /* rD := rA * rB */
@@ -892,8 +928,14 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_SPEFPUOP_CONV_32_64(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
- TCGv_i32 t1 = tcg_temp_new_i32(); \
+ TCGv_i64 t0; \
+ TCGv_i32 t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i32(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
gen_helper_##name(t1, cpu_env, t0); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
@@ -903,8 +945,14 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_SPEFPUOP_CONV_64_32(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
- TCGv_i32 t1 = tcg_temp_new_i32(); \
+ TCGv_i64 t0; \
+ TCGv_i32 t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
gen_helper_##name(t0, cpu_env, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
@@ -914,7 +962,12 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_SPEFPUOP_CONV_64_64(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i64 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
gen_helper_##name(t0, cpu_env, t0); \
gen_store_gpr64(rD(ctx->opcode), t0); \
@@ -923,13 +976,8 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_SPEFPUOP_ARITH2_32_32(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i32 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i32(); \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
gen_helper_##name(t0, cpu_env, t0, t1); \
@@ -958,13 +1006,8 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_SPEFPUOP_COMP_32(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i32 t0, t1; \
- if (unlikely(!ctx->spe_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_SPEU); \
- return; \
- } \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i32(); \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
\
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
@@ -1074,28 +1117,16 @@ GEN_SPEFPUOP_ARITH2_32_32(efsmul);
GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
static inline void gen_efsabs(DisasContext *ctx)
{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
(target_long)~0x80000000LL);
}
static inline void gen_efsnabs(DisasContext *ctx)
{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
0x80000000);
}
static inline void gen_efsneg(DisasContext *ctx)
{
- if (unlikely(!ctx->spe_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_SPEU);
- return;
- }
tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
0x80000000);
}
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index de2fd13..92b9527 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -801,18 +801,27 @@ static void trans_vclzd(DisasContext *ctx)
GEN_VXFORM(vmuloub, 4, 0);
GEN_VXFORM(vmulouh, 4, 1);
GEN_VXFORM(vmulouw, 4, 2);
-GEN_VXFORM(vmuluwm, 4, 2);
+GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM(vmulosb, 4, 4);
GEN_VXFORM(vmulosh, 4, 5);
GEN_VXFORM(vmulosw, 4, 6);
+GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7);
GEN_VXFORM(vmuleub, 4, 8);
GEN_VXFORM(vmuleuh, 4, 9);
GEN_VXFORM(vmuleuw, 4, 10);
+GEN_VXFORM(vmulhuw, 4, 10);
+GEN_VXFORM(vmulhud, 4, 11);
+GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE,
+ vmulhuw, PPC_NONE, PPC2_ISA310);
GEN_VXFORM(vmulesb, 4, 12);
GEN_VXFORM(vmulesh, 4, 13);
GEN_VXFORM(vmulesw, 4, 14);
+GEN_VXFORM(vmulhsw, 4, 14);
+GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE,
+ vmulhsw, PPC_NONE, PPC2_ISA310);
+GEN_VXFORM(vmulhsd, 4, 15);
GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4);
GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5);
GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6);
diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc
index 84e05fb..f3f4855 100644
--- a/target/ppc/translate/vmx-ops.c.inc
+++ b/target/ppc/translate/vmx-ops.c.inc
@@ -48,6 +48,9 @@ GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
PPC2_ISA300)
+#define GEN_VXFORM_310(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA310)
+
#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
@@ -104,12 +107,15 @@ GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
GEN_VXFORM(vmulosb, 4, 4),
GEN_VXFORM(vmulosh, 4, 5),
GEN_VXFORM_207(vmulosw, 4, 6),
+GEN_VXFORM_310(vmulld, 4, 7),
GEN_VXFORM(vmuleub, 4, 8),
GEN_VXFORM(vmuleuh, 4, 9),
-GEN_VXFORM_207(vmuleuw, 4, 10),
+GEN_VXFORM_DUAL(vmuleuw, vmulhuw, 4, 10, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_310(vmulhud, 4, 11),
GEN_VXFORM(vmulesb, 4, 12),
GEN_VXFORM(vmulesh, 4, 13),
-GEN_VXFORM_207(vmulesw, 4, 14),
+GEN_VXFORM_DUAL(vmulesw, vmulhsw, 4, 14, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_310(vmulhsd, 4, 15),
GEN_VXFORM(vslb, 2, 4),
GEN_VXFORM(vslh, 2, 5),
GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
index 7e66822..230a062 100644
--- a/target/ppc/translate_init.c.inc
+++ b/target/ppc/translate_init.c.inc
@@ -284,12 +284,24 @@ static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
ATTRIBUTE_UNUSED
static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_stop_exception(ctx);
+ }
}
static void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_stop_exception(ctx);
+ }
}
/* HDECR */
@@ -319,17 +331,35 @@ static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
static void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_stop_exception(ctx);
+ }
}
static void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_stop_exception(ctx);
+ }
}
static void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_stop_exception(ctx);
+ }
}
#endif
@@ -9201,7 +9231,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL;
+ PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
(1ull << MSR_TM) |