aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu-qom.h9
-rw-r--r--target/arm/cpu.c19
-rw-r--r--target/arm/cpu64.c8
-rw-r--r--target/arm/helper.c17
-rw-r--r--target/arm/helper.h27
-rw-r--r--target/arm/neon_helper.c24
-rw-r--r--target/arm/translate-a64.c64
-rw-r--r--target/arm/translate.c256
-rw-r--r--target/arm/translate.h5
-rw-r--r--target/arm/vec_helper.c25
-rw-r--r--target/riscv/cpu.c10
-rw-r--r--target/riscv/cpu.h1
-rw-r--r--target/riscv/cpu_helper.c18
-rw-r--r--target/s390x/cpu.c27
-rw-r--r--target/s390x/cpu.h7
-rw-r--r--target/s390x/cpu_features_def.inc.h1
-rw-r--r--target/s390x/diag.c75
-rw-r--r--target/s390x/gen-features.c1
-rw-r--r--target/s390x/helper.c6
-rw-r--r--target/s390x/ioinst.c96
-rw-r--r--target/s390x/kvm-stub.c5
-rw-r--r--target/s390x/kvm.c79
-rw-r--r--target/s390x/kvm_s390x.h3
-rw-r--r--target/s390x/mmu_helper.c14
24 files changed, 587 insertions, 210 deletions
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index d95568b..56395b8 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -35,7 +35,14 @@ struct arm_boot_info;
#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
-typedef struct ARMCPUInfo ARMCPUInfo;
+typedef struct ARMCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} ARMCPUInfo;
+
+void arm_cpu_register(const ARMCPUInfo *info);
+void aarch64_cpu_register(const ARMCPUInfo *info);
/**
* ARMCPUClass:
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index a79f233..141d947 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -582,7 +582,8 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
CPUARMState *env = &cpu->env;
bool ret = false;
- /* ARMv7-M interrupt masking works differently than -A or -R.
+ /*
+ * ARMv7-M interrupt masking works differently than -A or -R.
* There is no FIQ/IRQ distinction. Instead of I and F bits
* masking FIQ and IRQ interrupts, an exception is taken only
* if it is higher priority than the current execution priority
@@ -1912,7 +1913,8 @@ static void arm1026_initfn(Object *obj)
static void arm1136_r2_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
- /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
+ /*
+ * What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
* older core than plain "arm1136". In particular this does not
* have the v6K features.
* These ID register values are correct for 1136 but may be wrong
@@ -2693,18 +2695,13 @@ static void arm_max_initfn(Object *obj)
#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
-struct ARMCPUInfo {
- const char *name;
- void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
-};
-
static const ARMCPUInfo arm_cpus[] = {
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
{ .name = "arm926", .initfn = arm926_initfn },
{ .name = "arm946", .initfn = arm946_initfn },
{ .name = "arm1026", .initfn = arm1026_initfn },
- /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
+ /*
+ * What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
* older core than plain "arm1136". In particular this does not
* have the v6K features.
*/
@@ -2864,7 +2861,7 @@ static void cpu_register_class_init(ObjectClass *oc, void *data)
acc->info = data;
}
-static void cpu_register(const ARMCPUInfo *info)
+void arm_cpu_register(const ARMCPUInfo *info)
{
TypeInfo type_info = {
.parent = TYPE_ARM_CPU,
@@ -2905,7 +2902,7 @@ static void arm_cpu_register_types(void)
type_register_static(&idau_interface_type_info);
while (info->name) {
- cpu_register(info);
+ arm_cpu_register(info);
info++;
}
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 95d0c8c..74afc28 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -737,12 +737,6 @@ static void aarch64_max_initfn(Object *obj)
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
}
-struct ARMCPUInfo {
- const char *name;
- void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
-};
-
static const ARMCPUInfo aarch64_cpus[] = {
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
@@ -825,7 +819,7 @@ static void cpu_register_class_init(ObjectClass *oc, void *data)
acc->info = data;
}
-static void aarch64_cpu_register(const ARMCPUInfo *info)
+void aarch64_cpu_register(const ARMCPUInfo *info)
{
TypeInfo type_info = {
.parent = TYPE_AARCH64_CPU,
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7e9ea5d..dfefb9b 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3442,6 +3442,7 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
return CP_ACCESS_OK;
}
+#ifdef CONFIG_TCG
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
MMUAccessType access_type, ARMMMUIdx mmu_idx)
{
@@ -3602,9 +3603,11 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
}
return par64;
}
+#endif /* CONFIG_TCG */
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
+#ifdef CONFIG_TCG
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t par64;
ARMMMUIdx mmu_idx;
@@ -3664,17 +3667,26 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
par64 = do_ats_write(env, value, access_type, mmu_idx);
A32_BANKED_CURRENT_REG_SET(env, par, par64);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
}
static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+#ifdef CONFIG_TCG
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t par64;
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
A32_BANKED_CURRENT_REG_SET(env, par, par64);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
}
static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3689,6 +3701,7 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+#ifdef CONFIG_TCG
MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
ARMMMUIdx mmu_idx;
int secure = arm_is_secure_below_el3(env);
@@ -3728,6 +3741,10 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
}
env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
}
#endif
diff --git a/target/arm/helper.h b/target/arm/helper.h
index f37b867..5817626 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -275,19 +275,6 @@ DEF_HELPER_2(neon_hsub_u16, i32, i32, i32)
DEF_HELPER_2(neon_hsub_s32, s32, s32, s32)
DEF_HELPER_2(neon_hsub_u32, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_u8, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_s8, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_u16, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_s16, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_u32, i32, i32, i32)
-DEF_HELPER_2(neon_cgt_s32, i32, i32, i32)
-DEF_HELPER_2(neon_cge_u8, i32, i32, i32)
-DEF_HELPER_2(neon_cge_s8, i32, i32, i32)
-DEF_HELPER_2(neon_cge_u16, i32, i32, i32)
-DEF_HELPER_2(neon_cge_s16, i32, i32, i32)
-DEF_HELPER_2(neon_cge_u32, i32, i32, i32)
-DEF_HELPER_2(neon_cge_s32, i32, i32, i32)
-
DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
@@ -347,9 +334,6 @@ DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
-DEF_HELPER_2(neon_ceq_u8, i32, i32, i32)
-DEF_HELPER_2(neon_ceq_u16, i32, i32, i32)
-DEF_HELPER_2(neon_ceq_u32, i32, i32, i32)
DEF_HELPER_1(neon_clz_u8, i32, i32)
DEF_HELPER_1(neon_clz_u16, i32, i32)
@@ -686,6 +670,17 @@ DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
index c7a8438..448be93 100644
--- a/target/arm/neon_helper.c
+++ b/target/arm/neon_helper.c
@@ -562,24 +562,6 @@ uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
return dest;
}
-#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
-NEON_VOP(cgt_s8, neon_s8, 4)
-NEON_VOP(cgt_u8, neon_u8, 4)
-NEON_VOP(cgt_s16, neon_s16, 2)
-NEON_VOP(cgt_u16, neon_u16, 2)
-NEON_VOP(cgt_s32, neon_s32, 1)
-NEON_VOP(cgt_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
-NEON_VOP(cge_s8, neon_s8, 4)
-NEON_VOP(cge_u8, neon_u8, 4)
-NEON_VOP(cge_s16, neon_s16, 2)
-NEON_VOP(cge_u16, neon_u16, 2)
-NEON_VOP(cge_s32, neon_s32, 1)
-NEON_VOP(cge_u32, neon_u32, 1)
-#undef NEON_FN
-
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
NEON_POP(pmin_s8, neon_s8, 4)
NEON_POP(pmin_u8, neon_u8, 4)
@@ -1135,12 +1117,6 @@ NEON_VOP(tst_u16, neon_u16, 2)
NEON_VOP(tst_u32, neon_u32, 1)
#undef NEON_FN
-#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
-NEON_VOP(ceq_u8, neon_u8, 4)
-NEON_VOP(ceq_u16, neon_u16, 2)
-NEON_VOP(ceq_u32, neon_u32, 1)
-#undef NEON_FN
-
/* Count Leading Sign/Zero Bits. */
static inline int do_clz8(uint8_t x)
{
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 7580e46..efb1c4a 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -594,6 +594,14 @@ static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
is_q ? 16 : 8, vec_full_reg_size(s));
}
+/* Expand a 2-operand AdvSIMD vector operation using an op descriptor. */
+static void gen_gvec_op2(DisasContext *s, bool is_q, int rd,
+ int rn, const GVecGen2 *gvec_op)
+{
+ tcg_gen_gvec_2(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ is_q ? 16 : 8, vec_full_reg_size(s), gvec_op);
+}
+
/* Expand a 2-operand + immediate AdvSIMD vector operation using
* an op descriptor.
*/
@@ -12366,6 +12374,15 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0x8: /* CMGT, CMGE */
+ gen_gvec_op2(s, is_q, rd, rn, u ? &cge0_op[size] : &cgt0_op[size]);
+ return;
+ case 0x9: /* CMEQ, CMLE */
+ gen_gvec_op2(s, is_q, rd, rn, u ? &cle0_op[size] : &ceq0_op[size]);
+ return;
+ case 0xa: /* CMLT */
+ gen_gvec_op2(s, is_q, rd, rn, &clt0_op[size]);
+ return;
case 0xb:
if (u) { /* ABS, NEG */
gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
@@ -12403,29 +12420,12 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
TCGv_i32 tcg_op = tcg_temp_new_i32();
TCGv_i32 tcg_res = tcg_temp_new_i32();
- TCGCond cond;
read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
if (size == 2) {
/* Special cases for 32 bit elements */
switch (opcode) {
- case 0xa: /* CMLT */
- /* 32 bit integer comparison against zero, result is
- * test ? (2^32 - 1) : 0. We implement via setcond(test)
- * and inverting.
- */
- cond = TCG_COND_LT;
- do_cmop:
- tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
- tcg_gen_neg_i32(tcg_res, tcg_res);
- break;
- case 0x8: /* CMGT, CMGE */
- cond = u ? TCG_COND_GE : TCG_COND_GT;
- goto do_cmop;
- case 0x9: /* CMEQ, CMLE */
- cond = u ? TCG_COND_LE : TCG_COND_EQ;
- goto do_cmop;
case 0x4: /* CLS */
if (u) {
tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
@@ -12522,36 +12522,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
genfn(tcg_res, cpu_env, tcg_op);
break;
}
- case 0x8: /* CMGT, CMGE */
- case 0x9: /* CMEQ, CMLE */
- case 0xa: /* CMLT */
- {
- static NeonGenTwoOpFn * const fns[3][2] = {
- { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
- { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
- { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
- };
- NeonGenTwoOpFn *genfn;
- int comp;
- bool reverse;
- TCGv_i32 tcg_zero = tcg_const_i32(0);
-
- /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
- comp = (opcode - 0x8) * 2 + u;
- /* ...but LE, LT are implemented as reverse GE, GT */
- reverse = (comp > 2);
- if (reverse) {
- comp = 4 - comp;
- }
- genfn = fns[comp][size];
- if (reverse) {
- genfn(tcg_res, tcg_zero, tcg_op);
- } else {
- genfn(tcg_res, tcg_op, tcg_zero);
- }
- tcg_temp_free_i32(tcg_zero);
- break;
- }
case 0x4: /* CLS, CLZ */
if (u) {
if (size == 0) {
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 9f9f4e1..d4ad202 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3917,6 +3917,205 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
return 1;
}
+static void gen_ceq0_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_setcondi_i32(TCG_COND_EQ, d, a, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+static void gen_ceq0_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_setcondi_i64(TCG_COND_EQ, d, a, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_ceq0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
+{
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d);
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, d, a, zero);
+ tcg_temp_free_vec(zero);
+}
+
+static const TCGOpcode vecop_list_cmp[] = {
+ INDEX_op_cmp_vec, 0
+};
+
+const GVecGen2 ceq0_op[4] = {
+ { .fno = gen_helper_gvec_ceq0_b,
+ .fniv = gen_ceq0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_8 },
+ { .fno = gen_helper_gvec_ceq0_h,
+ .fniv = gen_ceq0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_16 },
+ { .fni4 = gen_ceq0_i32,
+ .fniv = gen_ceq0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_32 },
+ { .fni8 = gen_ceq0_i64,
+ .fniv = gen_ceq0_vec,
+ .opt_opc = vecop_list_cmp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
+static void gen_cle0_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_setcondi_i32(TCG_COND_LE, d, a, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+static void gen_cle0_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_setcondi_i64(TCG_COND_LE, d, a, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_cle0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
+{
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d);
+ tcg_gen_cmp_vec(TCG_COND_LE, vece, d, a, zero);
+ tcg_temp_free_vec(zero);
+}
+
+const GVecGen2 cle0_op[4] = {
+ { .fno = gen_helper_gvec_cle0_b,
+ .fniv = gen_cle0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_8 },
+ { .fno = gen_helper_gvec_cle0_h,
+ .fniv = gen_cle0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_16 },
+ { .fni4 = gen_cle0_i32,
+ .fniv = gen_cle0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_32 },
+ { .fni8 = gen_cle0_i64,
+ .fniv = gen_cle0_vec,
+ .opt_opc = vecop_list_cmp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
+static void gen_cge0_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_setcondi_i32(TCG_COND_GE, d, a, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+static void gen_cge0_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_setcondi_i64(TCG_COND_GE, d, a, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_cge0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
+{
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d);
+ tcg_gen_cmp_vec(TCG_COND_GE, vece, d, a, zero);
+ tcg_temp_free_vec(zero);
+}
+
+const GVecGen2 cge0_op[4] = {
+ { .fno = gen_helper_gvec_cge0_b,
+ .fniv = gen_cge0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_8 },
+ { .fno = gen_helper_gvec_cge0_h,
+ .fniv = gen_cge0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_16 },
+ { .fni4 = gen_cge0_i32,
+ .fniv = gen_cge0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_32 },
+ { .fni8 = gen_cge0_i64,
+ .fniv = gen_cge0_vec,
+ .opt_opc = vecop_list_cmp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
+static void gen_clt0_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_setcondi_i32(TCG_COND_LT, d, a, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+static void gen_clt0_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_setcondi_i64(TCG_COND_LT, d, a, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_clt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
+{
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d);
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, d, a, zero);
+ tcg_temp_free_vec(zero);
+}
+
+const GVecGen2 clt0_op[4] = {
+ { .fno = gen_helper_gvec_clt0_b,
+ .fniv = gen_clt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_8 },
+ { .fno = gen_helper_gvec_clt0_h,
+ .fniv = gen_clt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_16 },
+ { .fni4 = gen_clt0_i32,
+ .fniv = gen_clt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_32 },
+ { .fni8 = gen_clt0_i64,
+ .fniv = gen_clt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
+static void gen_cgt0_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_setcondi_i32(TCG_COND_GT, d, a, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+static void gen_cgt0_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_setcondi_i64(TCG_COND_GT, d, a, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_cgt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
+{
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d);
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, d, a, zero);
+ tcg_temp_free_vec(zero);
+}
+
+const GVecGen2 cgt0_op[4] = {
+ { .fno = gen_helper_gvec_cgt0_b,
+ .fniv = gen_cgt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_8 },
+ { .fno = gen_helper_gvec_cgt0_h,
+ .fniv = gen_cgt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_16 },
+ { .fni4 = gen_cgt0_i32,
+ .fniv = gen_cgt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .vece = MO_32 },
+ { .fni8 = gen_cgt0_i64,
+ .fniv = gen_cgt0_vec,
+ .opt_opc = vecop_list_cmp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
tcg_gen_vec_sar8i_i64(a, a, shift);
@@ -6481,6 +6680,27 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
break;
+ case NEON_2RM_VCEQ0:
+ tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
+ vec_size, &ceq0_op[size]);
+ break;
+ case NEON_2RM_VCGT0:
+ tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
+ vec_size, &cgt0_op[size]);
+ break;
+ case NEON_2RM_VCLE0:
+ tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
+ vec_size, &cle0_op[size]);
+ break;
+ case NEON_2RM_VCGE0:
+ tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
+ vec_size, &cge0_op[size]);
+ break;
+ case NEON_2RM_VCLT0:
+ tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
+ vec_size, &clt0_op[size]);
+ break;
+
default:
elementwise:
for (pass = 0; pass < (q ? 4 : 2); pass++) {
@@ -6543,42 +6763,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
default: abort();
}
break;
- case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
- tmp2 = tcg_const_i32(0);
- switch(size) {
- case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
- default: abort();
- }
- tcg_temp_free_i32(tmp2);
- if (op == NEON_2RM_VCLE0) {
- tcg_gen_not_i32(tmp, tmp);
- }
- break;
- case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
- tmp2 = tcg_const_i32(0);
- switch(size) {
- case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
- default: abort();
- }
- tcg_temp_free_i32(tmp2);
- if (op == NEON_2RM_VCLT0) {
- tcg_gen_not_i32(tmp, tmp);
- }
- break;
- case NEON_2RM_VCEQ0:
- tmp2 = tcg_const_i32(0);
- switch(size) {
- case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
- default: abort();
- }
- tcg_temp_free_i32(tmp2);
- break;
case NEON_2RM_VCGT0_F:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
diff --git a/target/arm/translate.h b/target/arm/translate.h
index d9ea0c9..98b319f 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -275,6 +275,11 @@ static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
uint64_t vfp_expand_imm(int size, uint8_t imm8);
/* Vector operations shared between ARM and AArch64. */
+extern const GVecGen2 ceq0_op[4];
+extern const GVecGen2 clt0_op[4];
+extern const GVecGen2 cgt0_op[4];
+extern const GVecGen2 cle0_op[4];
+extern const GVecGen2 cge0_op[4];
extern const GVecGen3 mla_op[4];
extern const GVecGen3 mls_op[4];
extern const GVecGen3 cmtst_op[4];
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 8017bd8..3d53418 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -1257,3 +1257,28 @@ void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
}
}
#endif
+
+#define DO_CMP0(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ *(TYPE *)(vd + i) = -(nn OP 0); \
+ } \
+ clear_tail(vd, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_CMP0(gvec_ceq0_b, int8_t, ==)
+DO_CMP0(gvec_clt0_b, int8_t, <)
+DO_CMP0(gvec_cle0_b, int8_t, <=)
+DO_CMP0(gvec_cgt0_b, int8_t, >)
+DO_CMP0(gvec_cge0_b, int8_t, >=)
+
+DO_CMP0(gvec_ceq0_h, int16_t, ==)
+DO_CMP0(gvec_clt0_h, int16_t, <)
+DO_CMP0(gvec_cle0_h, int16_t, <=)
+DO_CMP0(gvec_cgt0_h, int16_t, >)
+DO_CMP0(gvec_cge0_h, int16_t, >=)
+
+#undef DO_CMP0
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 4e57823..059d71f 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -164,6 +164,15 @@ static void rv32imacu_nommu_cpu_init(Object *obj)
set_feature(env, RISCV_FEATURE_PMP);
}
+static void rv32imafcu_nommu_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVC | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+ set_resetvec(env, DEFAULT_RSTVEC);
+ set_feature(env, RISCV_FEATURE_PMP);
+}
+
#elif defined(TARGET_RISCV64)
static void riscv_base64_cpu_init(Object *obj)
@@ -610,6 +619,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
#if defined(TARGET_RISCV32)
DEFINE_CPU(TYPE_RISCV_CPU_BASE32, riscv_base32_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32imacu_nommu_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32imafcu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init),
/* Depreacted */
DEFINE_CPU(TYPE_RISCV_CPU_RV32IMACU_NOMMU, rv32imacu_nommu_cpu_init),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 7d21add..d0e7f5b 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -36,6 +36,7 @@
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
+#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index d3ba9ef..bc80aa8 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -452,10 +452,11 @@ restart:
hwaddr pte_addr;
if (two_stage && first_stage) {
+ int vbase_prot;
hwaddr vbase;
/* Do the second stage translation on the base PTE address. */
- get_physical_address(env, &vbase, prot, base, access_type,
+ get_physical_address(env, &vbase, &vbase_prot, base, access_type,
mmu_idx, false, true);
pte_addr = vbase + idx * ptesize;
@@ -558,12 +559,7 @@ restart:
/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
target_ulong vpn = addr >> PGSHIFT;
- if (i == 0) {
- *physical = (ppn | (vpn & ((1L << (ptshift + widened)) - 1))) <<
- PGSHIFT;
- } else {
- *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
- }
+ *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
/* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
@@ -706,7 +702,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
#ifndef CONFIG_USER_ONLY
vaddr im_address;
hwaddr pa = 0;
- int prot;
+ int prot, prot2;
bool pmp_violation = false;
bool m_mode_two_stage = false;
bool hs_mode_two_stage = false;
@@ -756,13 +752,15 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Second stage lookup */
im_address = pa;
- ret = get_physical_address(env, &pa, &prot, im_address,
+ ret = get_physical_address(env, &pa, &prot2, im_address,
access_type, mmu_idx, false, true);
qemu_log_mask(CPU_LOG_MMU,
"%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
TARGET_FMT_plx " prot %d\n",
- __func__, im_address, ret, pa, prot);
+ __func__, im_address, ret, pa, prot2);
+
+ prot &= prot2;
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
(ret == TRANSLATE_SUCCESS) &&
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 427a46e..f2ccf0a 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -37,6 +37,7 @@
#include "sysemu/hw_accel.h"
#include "hw/qdev-properties.h"
#ifndef CONFIG_USER_ONLY
+#include "hw/s390x/pv.h"
#include "hw/boards.h"
#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
@@ -76,16 +77,24 @@ static bool s390_cpu_has_work(CPUState *cs)
static void s390_cpu_load_normal(CPUState *s)
{
S390CPU *cpu = S390_CPU(s);
- uint64_t spsw = ldq_phys(s->as, 0);
-
- cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL;
- /*
- * Invert short psw indication, so SIE will report a specification
- * exception if it was not set.
- */
- cpu->env.psw.mask ^= PSW_MASK_SHORTPSW;
- cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR;
+ uint64_t spsw;
+ if (!s390_is_pv()) {
+ spsw = ldq_phys(s->as, 0);
+ cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL;
+ /*
+ * Invert short psw indication, so SIE will report a specification
+ * exception if it was not set.
+ */
+ cpu->env.psw.mask ^= PSW_MASK_SHORTPSW;
+ cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR;
+ } else {
+ /*
+ * Firmware requires us to set the load state before we set
+ * the cpu to operating on protected guests.
+ */
+ s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu);
+ }
s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
}
#endif
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 1d17709..0354275 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -823,7 +823,12 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra);
-
+int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
+ int len, bool is_write);
+#define s390_cpu_pv_mem_read(cpu, offset, dest, len) \
+ s390_cpu_pv_mem_rw(cpu, offset, dest, len, false)
+#define s390_cpu_pv_mem_write(cpu, offset, dest, len) \
+ s390_cpu_pv_mem_rw(cpu, offset, dest, len, true)
/* sigp.c */
int s390_cpu_restart(S390CPU *cpu);
diff --git a/target/s390x/cpu_features_def.inc.h b/target/s390x/cpu_features_def.inc.h
index 31dff0d..60db283 100644
--- a/target/s390x/cpu_features_def.inc.h
+++ b/target/s390x/cpu_features_def.inc.h
@@ -107,6 +107,7 @@ DEF_FEAT(DEFLATE_BASE, "deflate-base", STFL, 151, "Deflate-conversion facility (
DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH, "vxpdeh", STFL, 152, "Vector-Packed-Decimal-Enhancement Facility")
DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)")
DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility")
+DEF_FEAT(UNPACK, "unpack", STFL, 161, "Unpack facility")
/* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility")
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
index 54e5670..1a48429 100644
--- a/target/s390x/diag.c
+++ b/target/s390x/diag.c
@@ -20,6 +20,8 @@
#include "sysemu/cpus.h"
#include "hw/s390x/ipl.h"
#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/s390x/pv.h"
+#include "kvm_s390x.h"
int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
@@ -49,20 +51,13 @@ int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
return diag288_class->handle_timer(diag288, func, timeout);
}
-#define DIAG_308_RC_OK 0x0001
-#define DIAG_308_RC_NO_CONF 0x0102
-#define DIAG_308_RC_INVALID 0x0402
-
-#define DIAG308_RESET_MOD_CLR 0
-#define DIAG308_RESET_LOAD_NORM 1
-#define DIAG308_LOAD_CLEAR 3
-#define DIAG308_LOAD_NORMAL_DUMP 4
-#define DIAG308_SET 5
-#define DIAG308_STORE 6
-
static int diag308_parm_check(CPUS390XState *env, uint64_t r1, uint64_t addr,
uintptr_t ra, bool write)
{
+ /* Handled by the Ultravisor */
+ if (s390_is_pv()) {
+ return 0;
+ }
if ((r1 & 1) || (addr & ~TARGET_PAGE_MASK)) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return -1;
@@ -78,7 +73,9 @@ static int diag308_parm_check(CPUS390XState *env, uint64_t r1, uint64_t addr,
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
{
+ bool valid;
CPUState *cs = env_cpu(env);
+ S390CPU *cpu = S390_CPU(cs);
uint64_t addr = env->regs[r1];
uint64_t subcode = env->regs[r3];
IplParameterBlock *iplb;
@@ -93,6 +90,11 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
return;
}
+ if (subcode >= DIAG308_PV_SET && !s390_has_feat(S390_FEAT_UNPACK)) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
switch (subcode) {
case DIAG308_RESET_MOD_CLR:
s390_ipl_reset_request(cs, S390_RESET_MODIFIED_CLEAR);
@@ -105,19 +107,30 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
s390_ipl_reset_request(cs, S390_RESET_REIPL);
break;
case DIAG308_SET:
+ case DIAG308_PV_SET:
if (diag308_parm_check(env, r1, addr, ra, false)) {
return;
}
iplb = g_new0(IplParameterBlock, 1);
- cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
+ if (!s390_is_pv()) {
+ cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
+ } else {
+ s390_cpu_pv_mem_read(cpu, 0, iplb, sizeof(iplb->len));
+ }
+
if (!iplb_valid_len(iplb)) {
env->regs[r1 + 1] = DIAG_308_RC_INVALID;
goto out;
}
- cpu_physical_memory_read(addr, iplb, be32_to_cpu(iplb->len));
+ if (!s390_is_pv()) {
+ cpu_physical_memory_read(addr, iplb, be32_to_cpu(iplb->len));
+ } else {
+ s390_cpu_pv_mem_read(cpu, 0, iplb, be32_to_cpu(iplb->len));
+ }
- if (!iplb_valid(iplb)) {
+ valid = subcode == DIAG308_PV_SET ? iplb_valid_pv(iplb) : iplb_valid(iplb);
+ if (!valid) {
env->regs[r1 + 1] = DIAG_308_RC_INVALID;
goto out;
}
@@ -128,17 +141,43 @@ out:
g_free(iplb);
return;
case DIAG308_STORE:
+ case DIAG308_PV_STORE:
if (diag308_parm_check(env, r1, addr, ra, true)) {
return;
}
- iplb = s390_ipl_get_iplb();
- if (iplb) {
- cpu_physical_memory_write(addr, iplb, be32_to_cpu(iplb->len));
- env->regs[r1 + 1] = DIAG_308_RC_OK;
+ if (subcode == DIAG308_PV_STORE) {
+ iplb = s390_ipl_get_iplb_pv();
} else {
+ iplb = s390_ipl_get_iplb();
+ }
+ if (!iplb) {
env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
+ return;
}
+
+ if (!s390_is_pv()) {
+ cpu_physical_memory_write(addr, iplb, be32_to_cpu(iplb->len));
+ } else {
+ s390_cpu_pv_mem_write(cpu, 0, iplb, be32_to_cpu(iplb->len));
+ }
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
return;
+ case DIAG308_PV_START:
+ iplb = s390_ipl_get_iplb_pv();
+ if (!iplb) {
+ env->regs[r1 + 1] = DIAG_308_RC_NO_PV_CONF;
+ return;
+ }
+
+ if (kvm_s390_get_hpage_1m()) {
+ error_report("Protected VMs can currently not be backed with "
+ "huge pages");
+ env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
+ return;
+ }
+
+ s390_ipl_reset_request(cs, S390_RESET_PV);
+ break;
default:
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
break;
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 6278845..8ddeebc 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -562,6 +562,7 @@ static uint16_t full_GEN15_GA1[] = {
S390_FEAT_GROUP_MSA_EXT_9,
S390_FEAT_GROUP_MSA_EXT_9_PCKMO,
S390_FEAT_ETOKEN,
+ S390_FEAT_UNPACK,
};
/* Default features (in order of release)
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index ed72684..09f6040 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -25,6 +25,7 @@
#include "qemu/timer.h"
#include "qemu/qemu-print.h"
#include "hw/s390x/ioinst.h"
+#include "hw/s390x/pv.h"
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
#ifndef CONFIG_USER_ONLY
@@ -246,6 +247,11 @@ int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
hwaddr len = sizeof(*sa);
int i;
+ /* For PVMs storing will occur when this cpu enters SIE again */
+ if (s390_is_pv()) {
+ return 0;
+ }
+
sa = cpu_physical_memory_map(addr, &len, true);
if (!sa) {
return -EFAULT;
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
index 0e840cc..7a14c52 100644
--- a/target/s390x/ioinst.c
+++ b/target/s390x/ioinst.c
@@ -16,6 +16,25 @@
#include "hw/s390x/ioinst.h"
#include "trace.h"
#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/pv.h"
+
+/* All I/O instructions but chsc use the s format */
+static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb,
+ uint8_t *ar)
+{
+ /*
+ * Addresses for protected guests are all offsets into the
+ * satellite block which holds the IO control structures. Those
+ * control structures are always starting at offset 0 and are
+ * always aligned and accessible. So we can return 0 here which
+ * will pass the following address checks.
+ */
+ if (s390_is_pv()) {
+ *ar = 0;
+ return 0;
+ }
+ return decode_basedisp_s(env, ipb, ar);
+}
int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
int *schid)
@@ -114,12 +133,14 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
CPUS390XState *env = &cpu->env;
uint8_t ar;
- addr = decode_basedisp_s(env, ipb, &ar);
+ addr = get_address_from_regs(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
- if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, &schib, sizeof(schib));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
@@ -171,12 +192,14 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
CPUS390XState *env = &cpu->env;
uint8_t ar;
- addr = decode_basedisp_s(env, ipb, &ar);
+ addr = get_address_from_regs(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
}
- if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, &orig_orb, sizeof(orb));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
@@ -203,7 +226,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
CPUS390XState *env = &cpu->env;
uint8_t ar;
- addr = decode_basedisp_s(env, ipb, &ar);
+ addr = get_address_from_regs(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
@@ -212,14 +235,19 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
cc = css_do_stcrw(&crw);
/* 0 - crw stored, 1 - zeroes stored */
- if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &crw, sizeof(crw));
setcc(cpu, cc);
} else {
- if (cc == 0) {
- /* Write failed: requeue CRW since STCRW is suppressing */
- css_undo_stcrw(&crw);
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
+ setcc(cpu, cc);
+ } else {
+ if (cc == 0) {
+ /* Write failed: requeue CRW since STCRW is suppressing */
+ css_undo_stcrw(&crw);
+ }
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
}
- s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
@@ -234,7 +262,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
CPUS390XState *env = &cpu->env;
uint8_t ar;
- addr = decode_basedisp_s(env, ipb, &ar);
+ addr = get_address_from_regs(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return;
@@ -242,6 +270,13 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
/*
+ * The Ultravisor checks schid bit 16 to be one and bits 0-12
+ * to be 0 and injects a operand exception itself.
+ *
+ * Hence we should never end up here.
+ */
+ g_assert(!s390_is_pv());
+ /*
* As operand exceptions have a lower priority than access exceptions,
* we check whether the memory area is writeable (injecting the
* access execption if it is not) first.
@@ -273,14 +308,17 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
}
}
if (cc != 3) {
- if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
- sizeof(schib)) != 0) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &schib, sizeof(schib));
+ } else if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
+ sizeof(schib)) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
} else {
/* Access exceptions have a higher priority than cc3 */
- if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
+ if (!s390_is_pv() &&
+ s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
@@ -303,7 +341,7 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
return -EIO;
}
trace_ioinst_sch_id("tsch", cssid, ssid, schid);
- addr = decode_basedisp_s(env, ipb, &ar);
+ addr = get_address_from_regs(env, ipb, &ar);
if (addr & 3) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
return -EIO;
@@ -317,7 +355,9 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
}
/* 0 - status pending, 1 - not status pending, 3 - not operational */
if (cc != 3) {
- if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &irb, irb_len);
+ } else if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
@@ -325,7 +365,8 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
} else {
irb_len = sizeof(irb) - sizeof(irb.emw);
/* Access exceptions have a higher priority than cc3 */
- if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
+ if (!s390_is_pv() &&
+ s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
@@ -601,7 +642,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
{
ChscReq *req;
ChscResp *res;
- uint64_t addr;
+ uint64_t addr = 0;
int reg;
uint16_t len;
uint16_t command;
@@ -610,7 +651,9 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
trace_ioinst("chsc");
reg = (ipb >> 20) & 0x00f;
- addr = env->regs[reg];
+ if (!s390_is_pv()) {
+ addr = env->regs[reg];
+ }
/* Page boundary? */
if (addr & 0xfff) {
s390_program_interrupt(env, PGM_SPECIFICATION, ra);
@@ -621,7 +664,9 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
* present CHSC sub-handlers ... if we ever need more, we should take
* care of req->len here first.
*/
- if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, buf, sizeof(ChscReq));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
@@ -654,11 +699,16 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
break;
}
- if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
- be16_to_cpu(res->len))) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr + len, res, be16_to_cpu(res->len));
setcc(cpu, 0); /* Command execution complete */
} else {
- s390_cpu_virt_mem_handle_exc(cpu, ra);
+ if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
+ be16_to_cpu(res->len))) {
+ setcc(cpu, 0); /* Command execution complete */
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
}
}
diff --git a/target/s390x/kvm-stub.c b/target/s390x/kvm-stub.c
index c4cd497..aa18501 100644
--- a/target/s390x/kvm-stub.c
+++ b/target/s390x/kvm-stub.c
@@ -39,6 +39,11 @@ int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
return 0;
}
+int kvm_s390_get_hpage_1m(void)
+{
+ return 0;
+}
+
int kvm_s390_get_ri(void)
{
return 0;
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index 7f7ebab..69881a0 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -50,6 +50,7 @@
#include "exec/memattrs.h"
#include "hw/s390x/s390-virtio-ccw.h"
#include "hw/s390x/s390-virtio-hcall.h"
+#include "hw/s390x/pv.h"
#ifndef DEBUG_KVM
#define DEBUG_KVM 0
@@ -115,6 +116,8 @@
#define ICPT_CPU_STOP 0x28
#define ICPT_OPEREXC 0x2c
#define ICPT_IO 0x40
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_INSTR_NOTIFICATION 0x6c
#define NR_LOCAL_IRQS 32
/*
@@ -152,6 +155,7 @@ static int cap_ri;
static int cap_gs;
static int cap_hpage_1m;
static int cap_vcpu_resets;
+static int cap_protected;
static int active_cmma;
@@ -321,6 +325,11 @@ void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
cap_hpage_1m = 1;
}
+int kvm_s390_get_hpage_1m(void)
+{
+ return cap_hpage_1m;
+}
+
static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -344,6 +353,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS);
+ cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED);
if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
|| !kvm_check_extension(s, KVM_CAP_S390_COW)) {
@@ -844,6 +854,30 @@ int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
return ret;
}
+int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf,
+ int len, bool is_write)
+{
+ struct kvm_s390_mem_op mem_op = {
+ .sida_offset = offset,
+ .size = len,
+ .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE
+ : KVM_S390_MEMOP_SIDA_READ,
+ .buf = (uint64_t)hostbuf,
+ };
+ int ret;
+
+ if (!cap_mem_op || !cap_protected) {
+ return -ENOSYS;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
+ if (ret < 0) {
+ error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
+ abort();
+ }
+ return ret;
+}
+
/*
* Legacy layout for s390:
* Older S390 KVM requires the topmost vma of the RAM to be
@@ -1199,12 +1233,27 @@ static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
sccb = env->regs[ipbh0 & 0xf];
code = env->regs[(ipbh0 & 0xf0) >> 4];
- r = sclp_service_call(env, sccb, code);
- if (r < 0) {
- kvm_s390_program_interrupt(cpu, -r);
- return;
+ switch (run->s390_sieic.icptcode) {
+ case ICPT_PV_INSTR_NOTIFICATION:
+ g_assert(s390_is_pv());
+ /* The notification intercepts are currently handled by KVM */
+ error_report("unexpected SCLP PV notification");
+ exit(1);
+ break;
+ case ICPT_PV_INSTR:
+ g_assert(s390_is_pv());
+ sclp_service_call_protected(env, sccb, code);
+ /* Setting the CC is done by the Ultravisor. */
+ break;
+ case ICPT_INSTRUCTION:
+ g_assert(!s390_is_pv());
+ r = sclp_service_call(env, sccb, code);
+ if (r < 0) {
+ kvm_s390_program_interrupt(cpu, -r);
+ return;
+ }
+ setcc(cpu, r);
}
- setcc(cpu, r);
}
static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
@@ -1693,6 +1742,8 @@ static int handle_intercept(S390CPU *cpu)
(long)cs->kvm_run->psw_addr);
switch (icpt_code) {
case ICPT_INSTRUCTION:
+ case ICPT_PV_INSTR:
+ case ICPT_PV_INSTR_NOTIFICATION:
r = handle_instruction(cpu, run);
break;
case ICPT_PROGRAM:
@@ -1773,7 +1824,9 @@ static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
SysIB_322 sysib;
int del, i;
- if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
return;
}
/* Shift the stack of Extended Names to prepare for our own data */
@@ -1826,7 +1879,11 @@ static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
/* Insert UUID */
memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
- s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib));
+ } else {
+ s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
+ }
}
static int handle_stsi(S390CPU *cpu)
@@ -2368,6 +2425,14 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
clear_bit(S390_FEAT_BPB, model->features);
}
+ /*
+ * If we have support for protected virtualization, indicate
+ * the protected virtualization IPL unpack facility.
+ */
+ if (cap_protected) {
+ set_bit(S390_FEAT_UNPACK, model->features);
+ }
+
/* We emulate a zPCI bus and AEN, therefore we don't need HW support */
set_bit(S390_FEAT_ZPCI, model->features);
set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
diff --git a/target/s390x/kvm_s390x.h b/target/s390x/kvm_s390x.h
index 0b21789..6ab17c8 100644
--- a/target/s390x/kvm_s390x.h
+++ b/target/s390x/kvm_s390x.h
@@ -19,10 +19,13 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
int len, bool is_write);
+int kvm_s390_mem_op_pv(S390CPU *cpu, vaddr addr, void *hostbuf, int len,
+ bool is_write);
void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code);
int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
+int kvm_s390_get_hpage_1m(void);
int kvm_s390_get_ri(void);
int kvm_s390_get_gs(void);
int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 0be2f30..7d9f305 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -474,6 +474,20 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
return 0;
}
+int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
+ int len, bool is_write)
+{
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_mem_op_pv(cpu, offset, hostbuf, len, is_write);
+ } else {
+ /* Protected Virtualization is a KVM/Hardware only feature */
+ g_assert_not_reached();
+ }
+ return ret;
+}
+
/**
* s390_cpu_virt_mem_rw:
* @laddr: the logical start address