aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c1
-rw-r--r--target/alpha/translate.c30
-rw-r--r--target/arm/cpu.c32
-rw-r--r--target/arm/cpu.h120
-rw-r--r--target/arm/helper.c338
-rw-r--r--target/arm/machine.c7
-rw-r--r--target/arm/op_helper.c3
-rw-r--r--target/arm/translate-a64.c23
-rw-r--r--target/arm/translate.c35
-rw-r--r--target/arm/translate.h6
-rw-r--r--target/hppa/cpu.c1
-rw-r--r--target/hppa/translate.c8
-rw-r--r--target/i386/arch_memory_mapping.c18
-rw-r--r--target/i386/cpu.c25
-rw-r--r--target/i386/cpu.h21
-rw-r--r--target/i386/hax-all.c10
-rw-r--r--target/i386/helper.c96
-rw-r--r--target/i386/kvm.c36
-rw-r--r--target/i386/machine.c4
-rw-r--r--target/i386/smm_helper.c18
-rw-r--r--target/i386/translate.c55
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/cpu.h1
-rw-r--r--target/m68k/translate.c11
-rw-r--r--target/mips/translate.c4
-rw-r--r--target/nios2/translate.c2
-rw-r--r--target/ppc/cpu.h1
-rw-r--r--target/s390x/cpu.c1
-rw-r--r--target/s390x/cpu.h7
-rw-r--r--target/s390x/cpu_models.c36
-rw-r--r--target/s390x/fpu_helper.c27
-rw-r--r--target/s390x/helper.c7
-rw-r--r--target/s390x/helper.h28
-rw-r--r--target/s390x/insn-data.def66
-rw-r--r--target/s390x/machine.c19
-rw-r--r--target/s390x/mem_helper.c1325
-rw-r--r--target/s390x/misc_helper.c4
-rw-r--r--target/s390x/mmu_helper.c4
-rw-r--r--target/s390x/translate.c543
-rw-r--r--target/sh4/cpu.h12
-rw-r--r--target/sh4/helper.c28
-rw-r--r--target/sh4/translate.c25
-rw-r--r--target/tilegx/cpu.c1
-rw-r--r--target/xtensa/cpu.h1
-rw-r--r--target/xtensa/gdbstub.c13
-rw-r--r--target/xtensa/xtensa-semi.c91
46 files changed, 2324 insertions, 822 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index b4f9798..8186c9d 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -23,7 +23,6 @@
#include "qapi/error.h"
#include "cpu.h"
#include "qemu-common.h"
-#include "migration/vmstate.h"
#include "exec/exec-all.h"
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index df5d695..7c45ae3 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -89,6 +89,9 @@ typedef enum {
updated the PC for the next instruction to be executed. */
EXIT_PC_STALE,
+ /* We are exiting the TB due to page crossing or space constraints. */
+ EXIT_FALLTHRU,
+
/* We are ending the TB with a noreturn function call, e.g. longjmp.
No following code will be executed. */
EXIT_NORETURN,
@@ -1157,6 +1160,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
#ifndef CONFIG_USER_ONLY
/* Privileged PAL code */
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
+ TCGv tmp;
switch (palcode) {
case 0x01:
/* CFLUSH */
@@ -1182,10 +1186,8 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
offsetof(CPUAlphaState, sysval));
break;
- case 0x35: {
+ case 0x35:
/* SWPIPL */
- TCGv tmp;
-
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
@@ -1197,7 +1199,6 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
- }
case 0x36:
/* RDPS */
@@ -1220,6 +1221,14 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
+ case 0x3E:
+ /* WTINT */
+ tmp = tcg_const_i64(1);
+ tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
+ offsetof(CPUState, halted));
+ tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
+ return gen_excp(ctx, EXCP_HALTED, 0);
+
default:
palcode &= 0x3f;
goto do_call_pal;
@@ -1369,7 +1378,7 @@ static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
tmp = tcg_const_i64(1);
tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
offsetof(CPUState, halted));
- return gen_excp(ctx, EXCP_HLT, 0);
+ return gen_excp(ctx, EXCP_HALTED, 0);
case 252:
/* HALT */
@@ -2978,7 +2987,7 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
|| num_insns >= max_insns
|| singlestep
|| ctx.singlestep_enabled)) {
- ret = EXIT_PC_STALE;
+ ret = EXIT_FALLTHRU;
}
} while (ret == NO_EXIT);
@@ -2990,6 +2999,13 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
case EXIT_GOTO_TB:
case EXIT_NORETURN:
break;
+ case EXIT_FALLTHRU:
+ if (use_goto_tb(&ctx, ctx.pc)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx.pc);
+ tcg_gen_exit_tb((uintptr_t)ctx.tb);
+ }
+ /* FALLTHRU */
case EXIT_PC_STALE:
tcg_gen_movi_i64(cpu_pc, ctx.pc);
/* FALLTHRU */
@@ -3001,7 +3017,7 @@ void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
}
break;
default:
- abort();
+ g_assert_not_reached();
}
gen_tb_end(tb, num_insns);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index c185eb1..28a9141 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -550,6 +550,14 @@ static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ /* M profile implies PMSA. We have to do this here rather than
+ * in realize with the other feature-implication checks because
+ * we look at the PMSA bit to see if we should add some properties.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
+ }
+
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
@@ -593,7 +601,7 @@ static void arm_cpu_post_init(Object *obj)
&error_abort);
}
- if (arm_feature(&cpu->env, ARM_FEATURE_MPU)) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
&error_abort);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -689,7 +697,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
if (arm_feature(env, ARM_FEATURE_V7) &&
!arm_feature(env, ARM_FEATURE_M) &&
- !arm_feature(env, ARM_FEATURE_MPU)) {
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
/* v7VMSA drops support for the old ARMv5 tiny pages, so we
* can use 4K pages.
*/
@@ -750,8 +758,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_pmu) {
- cpu->has_pmu = false;
unset_feature(env, ARM_FEATURE_PMU);
+ cpu->id_aa64dfr0 &= ~0xf00;
}
if (!arm_feature(env, ARM_FEATURE_EL2)) {
@@ -763,11 +771,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->id_pfr1 &= ~0xf000;
}
+ /* MPU can be configured out of a PMSA CPU either by setting has-mpu
+ * to false or by setting pmsav7-dregion to 0.
+ */
if (!cpu->has_mpu) {
- unset_feature(env, ARM_FEATURE_MPU);
+ cpu->pmsav7_dregion = 0;
+ }
+ if (cpu->pmsav7_dregion == 0) {
+ cpu->has_mpu = false;
}
- if (arm_feature(env, ARM_FEATURE_MPU) &&
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7)) {
uint32_t nr = cpu->pmsav7_dregion;
@@ -867,7 +881,7 @@ static void arm946_initfn(Object *obj)
cpu->dtb_compatible = "arm,arm946";
set_feature(&cpu->env, ARM_FEATURE_V5);
- set_feature(&cpu->env, ARM_FEATURE_MPU);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
cpu->midr = 0x41059461;
cpu->ctr = 0x0f004006;
@@ -1068,6 +1082,8 @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
.access = PL1_RW, .type = ARM_CP_CONST },
{ .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST },
+ { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
+ .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
REGINFO_SENTINEL
};
@@ -1079,7 +1095,7 @@ static void cortex_r5_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
- set_feature(&cpu->env, ARM_FEATURE_MPU);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
cpu->midr = 0x411fc153; /* r1p3 */
cpu->id_pfr0 = 0x0131;
cpu->id_pfr1 = 0x001;
@@ -1573,7 +1589,7 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
mp_affinity, ARM64_AFFINITY_INVALID),
- DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_END_OF_LIST()
};
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 048faed..16a1e59 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -418,6 +418,7 @@ typedef struct CPUARMState {
uint32_t dfsr; /* Debug Fault Status Register */
uint32_t mmfar; /* MemManage Fault Address */
uint32_t bfar; /* BusFault Address */
+ unsigned mpu_ctrl; /* MPU_CTRL (some bits kept in sctlr_el[1]) */
int exception;
} v7m;
@@ -703,6 +704,8 @@ struct ARMCPU {
ARMELChangeHook *el_change_hook;
void *el_change_hook_opaque;
+
+ int32_t node_id; /* NUMA node this CPU belongs to */
};
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
@@ -1168,6 +1171,11 @@ FIELD(V7M_DFSR, DWTTRAP, 2, 1)
FIELD(V7M_DFSR, VCATCH, 3, 1)
FIELD(V7M_DFSR, EXTERNAL, 4, 1)
+/* v7M MPU_CTRL bits */
+FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
+FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
+FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
+
/* If adding a feature bit which corresponds to a Linux ELF
* HWCAP bit, remember to update the feature-bit-to-hwcap
* mapping in linux-user/elfload.c:get_elf_hwcap().
@@ -1181,7 +1189,7 @@ enum arm_features {
ARM_FEATURE_V6K,
ARM_FEATURE_V7,
ARM_FEATURE_THUMB2,
- ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
+ ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
ARM_FEATURE_VFP3,
ARM_FEATURE_VFP_FP16,
ARM_FEATURE_NEON,
@@ -2039,6 +2047,28 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* for the accesses done as part of a stage 1 page table walk, rather than
* having to walk the stage 2 page table over and over.)
*
+ * R profile CPUs have an MPU, but can use the same set of MMU indexes
+ * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
+ * NS EL2 if we ever model a Cortex-R52).
+ *
+ * M profile CPUs are rather different as they do not have a true MMU.
+ * They have the following different MMU indexes:
+ * User
+ * Privileged
+ * Execution priority negative (this is like privileged, but the
+ * MPU HFNMIENA bit means that it may have different access permission
+ * check results to normal privileged code, so can't share a TLB).
+ *
+ * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
+ * are not quite the same -- different CPU types (most notably M profile
+ * vs A/R profile) would like to use MMU indexes with different semantics,
+ * but since we don't ever need to use all of those in a single CPU we
+ * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
+ * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
+ * the same for any particular CPU.
+ * Variables of type ARMMUIdx are always full values, and the core
+ * index values are in variables of type 'int'.
+ *
* Our enumeration includes at the end some entries which are not "true"
* mmu_idx values in that they don't have corresponding TLBs and are only
* valid for doing slow path page table walks.
@@ -2047,28 +2077,74 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* of the AT/ATS operations.
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
*/
+#define ARM_MMU_IDX_A 0x10 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x40 /* M profile */
+
+#define ARM_MMU_IDX_TYPE_MASK (~0x7)
+#define ARM_MMU_IDX_COREIDX_MASK 0x7
+
typedef enum ARMMMUIdx {
- ARMMMUIdx_S12NSE0 = 0,
- ARMMMUIdx_S12NSE1 = 1,
- ARMMMUIdx_S1E2 = 2,
- ARMMMUIdx_S1E3 = 3,
- ARMMMUIdx_S1SE0 = 4,
- ARMMMUIdx_S1SE1 = 5,
- ARMMMUIdx_S2NS = 6,
+ ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M,
/* Indexes below here don't have TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk.
*/
- ARMMMUIdx_S1NSE0 = 7,
- ARMMMUIdx_S1NSE1 = 8,
+ ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB,
} ARMMMUIdx;
+/* Bit macros for the core-mmu-index values for each index,
+ * for use when calling tlb_flush_by_mmuidx() and friends.
+ */
+typedef enum ARMMMUIdxBit {
+ ARMMMUIdxBit_S12NSE0 = 1 << 0,
+ ARMMMUIdxBit_S12NSE1 = 1 << 1,
+ ARMMMUIdxBit_S1E2 = 1 << 2,
+ ARMMMUIdxBit_S1E3 = 1 << 3,
+ ARMMMUIdxBit_S1SE0 = 1 << 4,
+ ARMMMUIdxBit_S1SE1 = 1 << 5,
+ ARMMMUIdxBit_S2NS = 1 << 6,
+ ARMMMUIdxBit_MUser = 1 << 0,
+ ARMMMUIdxBit_MPriv = 1 << 1,
+ ARMMMUIdxBit_MNegPri = 1 << 2,
+} ARMMMUIdxBit;
+
#define MMU_USER_IDX 0
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return mmu_idx | ARM_MMU_IDX_M;
+ } else {
+ return mmu_idx | ARM_MMU_IDX_A;
+ }
+}
+
/* Return the exception level we're running at if this is our mmu_idx */
static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
- assert(mmu_idx < ARMMMUIdx_S2NS);
- return mmu_idx & 3;
+ switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
+ case ARM_MMU_IDX_A:
+ return mmu_idx & 3;
+ case ARM_MMU_IDX_M:
+ return mmu_idx == ARMMMUIdx_MUser ? 0 : 1;
+ default:
+ g_assert_not_reached();
+ }
}
/* Determine the current mmu_idx to use for normal loads/stores */
@@ -2076,8 +2152,22 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv;
+
+ /* Execution priority is negative if FAULTMASK is set or
+ * we're in a HardFault or NMI handler.
+ */
+ if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
+ || env->daif & PSTATE_F) {
+ return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri);
+ }
+
+ return arm_to_core_mmu_idx(mmu_idx);
+ }
+
if (el < 2 && arm_is_secure_below_el3(env)) {
- return ARMMMUIdx_S1SE0 + el;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
}
return el;
}
@@ -2473,7 +2563,7 @@ static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
- ARMMMUIdx mmu_idx = cpu_mmu_index(env, false);
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
@@ -2498,7 +2588,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (mmu_idx << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 8a3e448..2594faa 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -485,7 +485,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
ARMCPU *cpu = arm_env_get_cpu(env);
- if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
+ if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
&& !extended_addresses_enabled(env)) {
/* For VMSA (when not using the LPAE long descriptor page table
* format) this register includes the ASID, so do a TLB flush.
@@ -571,9 +571,9 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -582,9 +582,9 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -605,7 +605,7 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -621,7 +621,7 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 40);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -629,7 +629,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -637,7 +637,7 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -646,7 +646,7 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = ENV_GET_CPU(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -656,7 +656,7 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static const ARMCPRegInfo cp_reginfo[] = {
@@ -2596,9 +2596,9 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Accesses to VTTBR may change the VMID so we must flush the TLB. */
if (raw_read(env, ri) != value) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
raw_write(env, ri, value);
}
}
@@ -2957,12 +2957,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -2974,12 +2974,12 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -2995,18 +2995,18 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
if (arm_feature(env, ARM_FEATURE_EL2)) {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
}
@@ -3017,7 +3017,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3026,7 +3026,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3042,17 +3042,17 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else if (has_el2) {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0) |
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0 |
+ ARMMMUIdxBit_S2NS);
} else {
tlb_flush_by_mmuidx_all_cpus_synced(cs,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3061,7 +3061,7 @@ static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3069,7 +3069,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = ENV_GET_CPU(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3086,12 +3086,12 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_is_secure_below_el3(env)) {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3106,7 +3106,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3120,7 +3120,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3133,12 +3133,12 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (sec) {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1SE1) |
- (1 << ARMMMUIdx_S1SE0));
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
} else {
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S12NSE1) |
- (1 << ARMMMUIdx_S12NSE0));
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
}
}
@@ -3149,7 +3149,7 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E2));
+ ARMMMUIdxBit_S1E2);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3159,7 +3159,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = sextract64(value << 12, 0, 56);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S1E3));
+ ARMMMUIdxBit_S1E3);
}
static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3181,7 +3181,7 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
- tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
}
static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3197,7 +3197,7 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
pageaddr = sextract64(value << 12, 0, 48);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- (1 << ARMMMUIdx_S2NS));
+ ARMMMUIdxBit_S2NS);
}
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3258,6 +3258,11 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
+ if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
+ /* M bit is RAZ/WI for PMSA with no MPU implemented */
+ value &= ~SCTLR_M;
+ }
+
raw_write(env, ri, value);
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
@@ -4615,7 +4620,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V7MP) &&
- !arm_feature(env, ARM_FEATURE_MPU)) {
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
define_arm_cp_regs(cpu, v7mp_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V7)) {
@@ -4969,7 +4974,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
}
- if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
if (arm_feature(env, ARM_FEATURE_V6)) {
/* PMSAv6 not implemented */
assert(arm_feature(env, ARM_FEATURE_V7));
@@ -5131,7 +5136,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
}
define_arm_cp_regs(cpu, id_cp_reginfo);
- if (!arm_feature(env, ARM_FEATURE_MPU)) {
+ if (!arm_feature(env, ARM_FEATURE_PMSA)) {
define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
@@ -6337,10 +6342,49 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* TODO: if we implemented the MPU registers, this is where we
- * should set the MMFAR, etc from exception.fsr and exception.vaddress.
+ /* Note that for M profile we don't have a guest facing FSR, but
+ * the env->exception.fsr will be populated by the code that
+ * raises the fault, in the A profile short-descriptor format.
*/
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ switch (env->exception.fsr & 0xf) {
+ case 0x8: /* External Abort */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr |=
+ (R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.bfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.IBUSERR and BFAR 0x%x\n",
+ env->v7m.bfar);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
+ break;
+ default:
+ /* All other FSR values are either MPU faults or "can't happen
+ * for M profile" cases.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr |=
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
+ env->v7m.mmfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
+ env->v7m.mmfar);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ break;
+ }
break;
case EXCP_BKPT:
if (semihosting_enabled()) {
@@ -6992,6 +7036,9 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1SE1:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ case ARMMMUIdx_MUser:
return 1;
default:
g_assert_not_reached();
@@ -7008,6 +7055,9 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1NSE1:
case ARMMMUIdx_S1E2:
case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ case ARMMMUIdx_MUser:
return false;
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
@@ -7028,6 +7078,24 @@ static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline bool regime_translation_disabled(CPUARMState *env,
ARMMMUIdx mmu_idx)
{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ switch (env->v7m.mpu_ctrl &
+ (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
+ case R_V7M_MPU_CTRL_ENABLE_MASK:
+ /* Enabled, but not for HardFault and NMI */
+ return mmu_idx == ARMMMUIdx_MNegPri;
+ case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
+ /* Enabled for all cases */
+ return false;
+ case 0:
+ default:
+ /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
+ * we warned about that in armv7m_nvic.c when the guest set it.
+ */
+ return true;
+ }
+ }
+
if (mmu_idx == ARMMMUIdx_S2NS) {
return (env->cp15.hcr_el2 & HCR_VM) == 0;
}
@@ -7049,6 +7117,17 @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
+/* Convert a possible stage1+2 MMU index into the appropriate
+ * stage 1 MMU index
+ */
+static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
+ }
+ return mmu_idx;
+}
+
/* Returns TBI0 value for current regime el */
uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
{
@@ -7056,11 +7135,9 @@ uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
@@ -7079,11 +7156,9 @@ uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
uint32_t el;
/* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
- * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
- */
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
tcr = regime_tcr(env, mmu_idx);
el = regime_el(env, mmu_idx);
@@ -7129,9 +7204,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env,
* on whether the long or short descriptor format is in use. */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- mmu_idx += ARMMMUIdx_S1NSE0;
- }
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
return regime_using_lpae_format(env, mmu_idx);
}
@@ -7141,6 +7214,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_MUser:
return true;
default:
return false;
@@ -8114,18 +8188,60 @@ static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
ARMMMUIdx mmu_idx,
int32_t address, int *prot)
{
- *prot = PAGE_READ | PAGE_WRITE;
- switch (address) {
- case 0xF0000000 ... 0xFFFFFFFF:
- if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ *prot = PAGE_READ | PAGE_WRITE;
+ switch (address) {
+ case 0xF0000000 ... 0xFFFFFFFF:
+ if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
+ /* hivecs execing is ok */
+ *prot |= PAGE_EXEC;
+ }
+ break;
+ case 0x00000000 ... 0x7FFFFFFF:
*prot |= PAGE_EXEC;
+ break;
+ }
+ } else {
+ /* Default system address map for M profile cores.
+ * The architecture specifies which regions are execute-never;
+ * at the MPU level no other checks are defined.
+ */
+ switch (address) {
+ case 0x00000000 ... 0x1fffffff: /* ROM */
+ case 0x20000000 ... 0x3fffffff: /* SRAM */
+ case 0x60000000 ... 0x7fffffff: /* RAM */
+ case 0x80000000 ... 0x9fffffff: /* RAM */
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+ case 0x40000000 ... 0x5fffffff: /* Peripheral */
+ case 0xa0000000 ... 0xbfffffff: /* Device */
+ case 0xc0000000 ... 0xdfffffff: /* Device */
+ case 0xe0000000 ... 0xffffffff: /* System */
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ default:
+ g_assert_not_reached();
}
- break;
- case 0x00000000 ... 0x7FFFFFFF:
- *prot |= PAGE_EXEC;
- break;
+ }
+}
+
+static bool pmsav7_use_background_region(ARMCPU *cpu,
+ ARMMMUIdx mmu_idx, bool is_user)
+{
+ /* Return true if we should use the default memory map as a
+ * "background" region if there are no hits against any MPU regions.
+ */
+ CPUARMState *env = &cpu->env;
+
+ if (is_user) {
+ return false;
}
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ } else {
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
+ }
}
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
@@ -8154,16 +8270,18 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
if (!rsize) {
- qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRSR[%d]: Rsize field cannot be 0\n", n);
continue;
}
rsize++;
rmask = (1ull << rsize) - 1;
if (base & rmask) {
- qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
- "to DRSR region size, mask = %" PRIx32,
- base, rmask);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRBAR[%d]: 0x%" PRIx32 " misaligned "
+ "to DRSR region size, mask = 0x%" PRIx32 "\n",
+ n, base, rmask);
continue;
}
@@ -8200,9 +8318,10 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
}
if (rsize < TARGET_PAGE_BITS) {
- qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
+ qemu_log_mask(LOG_UNIMP,
+ "DRSR[%d]: No support for MPU (sub)region "
"alignment of %" PRIu32 " bits. Minimum is %d\n",
- rsize, TARGET_PAGE_BITS);
+ n, rsize, TARGET_PAGE_BITS);
continue;
}
if (srdis) {
@@ -8212,8 +8331,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
if (n == -1) { /* no hits */
- if (cpu->pmsav7_dregion &&
- (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
+ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
/* background fault */
*fsr = 0;
return true;
@@ -8237,8 +8355,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "Bad value for AP bits in DRACR %"
- PRIx32 "\n", ap);
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
}
} else { /* Priv. mode AP bits decoding */
switch (ap) {
@@ -8255,8 +8373,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
- "Bad value for AP bits in DRACR %"
- PRIx32 "\n", ap);
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
}
}
@@ -8385,7 +8503,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
int ret;
ret = get_phys_addr(env, address, access_type,
- mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ stage_1_mmu_idx(mmu_idx), &ipa, attrs,
prot, page_size, fsr, fi);
/* If S1 fails or S2 is disabled, return early. */
@@ -8406,7 +8524,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/*
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
*/
- mmu_idx += ARMMMUIdx_S1NSE0;
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
}
}
@@ -8432,11 +8550,23 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* pmsav7 has special handling for when MPU is disabled so call it before
* the common MMU/MPU disabled check below.
*/
- if (arm_feature(env, ARM_FEATURE_MPU) &&
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7)) {
+ bool ret;
*page_size = TARGET_PAGE_SIZE;
- return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
+ " mmu_idx %u -> %s (prot %c%c%c)\n",
+ access_type == 1 ? "reading" :
+ (access_type == 2 ? "writing" : "execute"),
+ (uint32_t)address, mmu_idx,
+ ret ? "Miss" : "Hit",
+ *prot & PAGE_READ ? 'r' : '-',
+ *prot & PAGE_WRITE ? 'w' : '-',
+ *prot & PAGE_EXEC ? 'x' : '-');
+
+ return ret;
}
if (regime_translation_disabled(env, mmu_idx)) {
@@ -8447,7 +8577,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
return 0;
}
- if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
/* Pre-v7 MPU */
*page_size = TARGET_PAGE_SIZE;
return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
@@ -8482,7 +8612,8 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
int ret;
MemTxAttrs attrs = {};
- ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
+ ret = get_phys_addr(env, address, access_type,
+ core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
&attrs, &prot, &page_size, fsr, fi);
if (!ret) {
/* Map a single [sub]page. */
@@ -8507,10 +8638,11 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
bool ret;
uint32_t fsr;
ARMMMUFaultInfo fi = {};
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
*attrs = (MemTxAttrs) {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
+ ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
attrs, &prot, &page_size, &fsr, &fi);
if (ret) {
diff --git a/target/arm/machine.c b/target/arm/machine.c
index d8094a8..1a40469 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -99,8 +99,8 @@ static bool m_needed(void *opaque)
static const VMStateDescription vmstate_m = {
.name = "cpu/m",
- .version_id = 3,
- .minimum_version_id = 3,
+ .version_id = 4,
+ .minimum_version_id = 4,
.needed = m_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
@@ -112,6 +112,7 @@ static const VMStateDescription vmstate_m = {
VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
VMSTATE_UINT32(env.v7m.mmfar, ARMCPU),
VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
+ VMSTATE_UINT32(env.v7m.mpu_ctrl, ARMCPU),
VMSTATE_INT32(env.v7m.exception, ARMCPU),
VMSTATE_END_OF_LIST()
}
@@ -142,7 +143,7 @@ static bool pmsav7_needed(void *opaque)
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
- return arm_feature(env, ARM_FEATURE_MPU) &&
+ return arm_feature(env, ARM_FEATURE_PMSA) &&
arm_feature(env, ARM_FEATURE_V7);
}
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 156b825..2a85666 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -194,6 +194,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
int target_el;
bool same_el;
uint32_t syn;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
if (retaddr) {
/* now we have a real cpu fault */
@@ -208,7 +209,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
/* the DFSR for an alignment fault depends on whether we're using
* the LPAE long descriptor format, or the short descriptor format
*/
- if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
+ if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
env->exception.fsr = (1 << 9) | 0x21;
} else {
env->exception.fsr = 0x1;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 24de30d..860e279 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -101,21 +101,27 @@ void a64_translate_init(void)
offsetof(CPUARMState, exclusive_high), "exclusive_high");
}
-static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+static inline int get_a64_user_mem_index(DisasContext *s)
{
- /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+ /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
* if EL1, access as if EL0; otherwise access at current EL
*/
+ ARMMMUIdx useridx;
+
switch (s->mmu_idx) {
case ARMMMUIdx_S12NSE1:
- return ARMMMUIdx_S12NSE0;
+ useridx = ARMMMUIdx_S12NSE0;
+ break;
case ARMMMUIdx_S1SE1:
- return ARMMMUIdx_S1SE0;
+ useridx = ARMMMUIdx_S1SE0;
+ break;
case ARMMMUIdx_S2NS:
g_assert_not_reached();
default:
- return s->mmu_idx;
+ useridx = s->mmu_idx;
+ break;
}
+ return arm_to_core_mmu_idx(useridx);
}
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
@@ -373,7 +379,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
} else if (s->singlestep_enabled) {
gen_exception_internal(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_pc);
s->is_jmp = DISAS_TB_JUMP;
}
}
@@ -11212,7 +11218,7 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
- dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
dc->tbi0 = ARM_TBFLAG_TBI0(tb->flags);
dc->tbi1 = ARM_TBFLAG_TBI1(tb->flags);
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
@@ -11361,8 +11367,7 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
gen_a64_set_pc_im(dc->pc);
/* fall through */
case DISAS_JUMP:
- /* indicate that the hash table must be used to find the next TB */
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_pc);
break;
case DISAS_TB_JUMP:
case DISAS_EXC:
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 0b5a0bc..0862f9e 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -145,9 +145,9 @@ static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
disas_set_insn_syndrome(s, syn);
}
-static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+static inline int get_a32_user_mem_index(DisasContext *s)
{
- /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
* insns:
* if PL2, UNPREDICTABLE (we choose to implement as if PL0)
* otherwise, access as if at PL0.
@@ -156,11 +156,15 @@ static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_S12NSE0:
case ARMMMUIdx_S12NSE1:
- return ARMMMUIdx_S12NSE0;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1SE1:
- return ARMMMUIdx_S1SE0;
+ return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
case ARMMMUIdx_S2NS:
default:
g_assert_not_reached();
@@ -1178,7 +1182,7 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception_internal(excp);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXC;
}
static void gen_exception_insn(DisasContext *s, int offset, int excp,
@@ -1187,14 +1191,14 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception(excp, syn, target_el);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXC;
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXIT;
}
static inline void gen_hlt(DisasContext *s, int imm)
@@ -4146,7 +4150,15 @@ static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
#endif
}
-static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+static void gen_goto_ptr(void)
+{
+ TCGv addr = tcg_temp_new();
+ tcg_gen_extu_i32_tl(addr, cpu_R[15]);
+ tcg_gen_lookup_and_goto_ptr(addr);
+ tcg_temp_free(addr);
+}
+
+static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
{
if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
@@ -4154,7 +4166,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(s, dest);
- tcg_gen_exit_tb(0);
+ gen_goto_ptr();
}
}
@@ -11816,7 +11828,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
- dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
@@ -12087,11 +12099,14 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
gen_set_pc_im(dc, dc->pc);
/* fall through */
case DISAS_JUMP:
+ gen_goto_ptr();
+ break;
default:
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
case DISAS_TB_JUMP:
+ case DISAS_EXC:
/* nothing more to generate */
break;
case DISAS_WFI:
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 629dab9..15d383d 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -88,7 +88,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
static inline int get_mem_index(DisasContext *s)
{
- return s->mmu_idx;
+ return arm_to_core_mmu_idx(s->mmu_idx);
}
/* Function used to determine the target exception EL when otherwise not known
@@ -139,6 +139,10 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
* custom end-of-TB code)
*/
#define DISAS_BX_EXCRET 11
+/* For instructions which want an immediate exit to the main loop,
+ * as opposed to attempting to use lookup_and_goto_ptr.
+ */
+#define DISAS_EXIT 12
#ifdef TARGET_AARCH64
void a64_translate_init(void);
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 1d791d0..30299e9 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -22,7 +22,6 @@
#include "qapi/error.h"
#include "cpu.h"
#include "qemu-common.h"
-#include "migration/vmstate.h"
#include "exec/exec-all.h"
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 9e8c233..e10abc5 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -517,7 +517,7 @@ static void gen_goto_tb(DisasContext *ctx, int which,
if (ctx->singlestep_enabled) {
gen_excp_1(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
}
}
}
@@ -1510,7 +1510,7 @@ static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
} else if (is_n && use_nullify_skip(ctx)) {
/* The (conditional) branch, B, nullifies the next insn, N,
and we're allowed to skip execution N (no single-step or
- tracepoint in effect). Since the exit_tb that we must use
+ tracepoint in effect). Since the goto_ptr that we must use
for the indirect branch consumes no special resources, we
can (conditionally) skip B and continue execution. */
/* The use_nullify_skip test implies we have a known control path. */
@@ -1527,7 +1527,7 @@ static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
if (link != 0) {
tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
return nullify_end(ctx, NO_EXIT);
} else {
cond_prep(&ctx->null_cond);
@@ -3885,7 +3885,7 @@ void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
if (ctx.singlestep_enabled) {
gen_excp_1(EXCP_DEBUG);
} else {
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_iaoq_f);
}
break;
default:
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
index 826aee5..647cff2 100644
--- a/target/i386/arch_memory_mapping.c
+++ b/target/i386/arch_memory_mapping.c
@@ -272,25 +272,27 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ int32_t a20_mask;
if (!cpu_paging_enabled(cs)) {
/* paging is disabled */
return;
}
+ a20_mask = x86_get_a20_mask(env);
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
hwaddr pml5e_addr;
- pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
- walk_pml5e(list, cs->as, pml5e_addr, env->a20_mask);
+ pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml5e(list, cs->as, pml5e_addr, a20_mask);
} else {
hwaddr pml4e_addr;
- pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
- walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask,
+ pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml4e(list, cs->as, pml4e_addr, a20_mask,
0xffffULL << 48);
}
} else
@@ -298,16 +300,16 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
{
hwaddr pdpe_addr;
- pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
- walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
+ pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask;
+ walk_pdpe2(list, cs->as, pdpe_addr, a20_mask);
}
} else {
hwaddr pde_addr;
bool pse;
- pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
+ pde_addr = (env->cr[3] & ~0xfff) & a20_mask;
pse = !!(env->cr[4] & CR4_PSE_MASK);
- walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
+ walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
}
}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index a41d595..b2b1d20 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3239,7 +3239,7 @@ static void x86_cpu_machine_done(Notifier *n, void *unused)
cpu->smram = g_new(MemoryRegion, 1);
memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
smram, 0, 1ull << 32);
- memory_region_set_enabled(cpu->smram, false);
+ memory_region_set_enabled(cpu->smram, true);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
}
}
@@ -3619,7 +3619,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
- AddressSpace *newas = g_new(AddressSpace, 1);
+ AddressSpace *as_normal = address_space_init_shareable(cs->memory,
+ "cpu-memory");
+ AddressSpace *as_smm = g_new(AddressSpace, 1);
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
@@ -3635,9 +3637,11 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
- address_space_init(newas, cpu->cpu_as_root, "CPU");
- cs->num_ases = 1;
- cpu_address_space_init(cs, newas, 0);
+ address_space_init(as_smm, cpu->cpu_as_root, "CPU");
+
+ cs->num_ases = 2;
+ cpu_address_space_init(cs, as_normal, 0);
+ cpu_address_space_init(cs, as_smm, 1);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
@@ -3986,7 +3990,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
#endif
- DEFINE_PROP_INT32("node-id", CPUState, numa_node, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
@@ -4053,6 +4057,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
+ cc->asidx_from_attrs = x86_asidx_from_attrs;
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->write_elf64_note = x86_cpu_write_elf64_note;
@@ -4063,11 +4068,11 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
#endif
cc->gdb_arch_name = x86_gdb_arch_name;
#ifdef TARGET_X86_64
- cc->gdb_core_xml_file = "i386-64bit-core.xml";
- cc->gdb_num_core_regs = 40;
+ cc->gdb_core_xml_file = "i386-64bit.xml";
+ cc->gdb_num_core_regs = 57;
#else
- cc->gdb_core_xml_file = "i386-32bit-core.xml";
- cc->gdb_num_core_regs = 32;
+ cc->gdb_core_xml_file = "i386-32bit.xml";
+ cc->gdb_num_core_regs = 41;
#endif
#ifndef CONFIG_USER_ONLY
cc->debug_excp_handler = breakpoint_handler;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c4602ca..de0551f 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1275,6 +1275,7 @@ struct X86CPU {
struct kvm_msrs *kvm_msr_buf;
+ int32_t node_id; /* NUMA node this CPU belongs to */
int32_t socket_id;
int32_t core_id;
int32_t thread_id;
@@ -1450,6 +1451,16 @@ int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
#ifndef CONFIG_USER_ONLY
+static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return !!attrs.secure;
+}
+
+static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
+}
+
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
@@ -1624,6 +1635,15 @@ static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
}
+static inline int32_t x86_get_a20_mask(CPUX86State *env)
+{
+ if (env->hflags & HF_SMM_MASK) {
+ return -1;
+ } else {
+ return env->a20_mask;
+ }
+}
+
/* fpu_helper.c */
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
@@ -1643,7 +1663,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
-void cpu_smm_update(X86CPU *cpu);
/* apic.c */
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 7346931..097db5c 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -635,6 +635,16 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
+static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->hax_vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
int hax_smp_cpu_exec(CPUState *cpu)
{
CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
diff --git a/target/i386/helper.c b/target/i386/helper.c
index ee7eff2..ef05059 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -724,6 +724,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
+ int32_t a20_mask;
target_ulong pde_addr, pte_addr;
int error_code = 0;
int is_dirty, prot, page_size, is_write, is_user;
@@ -739,6 +740,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
#endif
is_write = is_write1 & 1;
+ a20_mask = x86_get_a20_mask(env);
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
#ifdef TARGET_X86_64
@@ -777,7 +779,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (la57) {
pml5e_addr = ((env->cr[3] & ~0xfff) +
- (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
goto do_fault;
@@ -796,7 +798,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
- (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault;
@@ -810,7 +812,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
ptep &= pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
@@ -835,7 +837,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
{
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
@@ -848,7 +850,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
@@ -870,7 +872,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
x86_stl_phys_notdirty(cs, pde_addr, pde);
}
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
- env->a20_mask;
+ a20_mask;
pte = x86_ldq_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
@@ -886,7 +888,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
- env->a20_mask;
+ a20_mask;
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
goto do_fault;
@@ -913,7 +915,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
- env->a20_mask;
+ a20_mask;
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
goto do_fault;
@@ -992,7 +994,7 @@ do_check_protect_pse36:
}
do_mapping:
- pte = pte & env->a20_mask;
+ pte = pte & a20_mask;
/* align to page_size */
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
@@ -1039,11 +1041,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
CPUX86State *env = &cpu->env;
target_ulong pde_addr, pte_addr;
uint64_t pte;
+ int32_t a20_mask;
uint32_t page_offset;
int page_size;
+ a20_mask = x86_get_a20_mask(env);
if (!(env->cr[0] & CR0_PG_MASK)) {
- pte = addr & env->a20_mask;
+ pte = addr & a20_mask;
page_size = 4096;
} else if (env->cr[4] & CR4_PAE_MASK) {
target_ulong pdpe_addr;
@@ -1064,7 +1068,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (la57) {
pml5e_addr = ((env->cr[3] & ~0xfff) +
- (((addr >> 48) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
pml5e = x86_ldq_phys(cs, pml5e_addr);
if (!(pml5e & PG_PRESENT_MASK)) {
return -1;
@@ -1074,13 +1078,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
- (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
pml4e = x86_ldq_phys(cs, pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
return -1;
}
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
- (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
return -1;
@@ -1095,14 +1099,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
#endif
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
- env->a20_mask;
+ a20_mask;
pdpe = x86_ldq_phys(cs, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
pde_addr = ((pdpe & PG_ADDRESS_MASK) +
- (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
pde = x86_ldq_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
return -1;
@@ -1114,7 +1118,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} else {
/* 4 KB page */
pte_addr = ((pde & PG_ADDRESS_MASK) +
- (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
+ (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
page_size = 4096;
pte = x86_ldq_phys(cs, pte_addr);
}
@@ -1125,7 +1129,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
uint32_t pde;
/* page directory entry */
- pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
+ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
pde = x86_ldl_phys(cs, pde_addr);
if (!(pde & PG_PRESENT_MASK))
return -1;
@@ -1134,14 +1138,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
page_size = 4096 * 1024;
} else {
/* page directory entry */
- pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
pte = x86_ldl_phys(cs, pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
return -1;
}
page_size = 4096;
}
- pte = pte & env->a20_mask;
+ pte = pte & a20_mask;
}
#ifdef TARGET_X86_64
@@ -1399,89 +1403,89 @@ uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldub(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldub(as, addr, attrs, NULL);
}
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_lduw(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_lduw(as, addr, attrs, NULL);
}
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldl(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldl(as, addr, attrs, NULL);
}
uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- return address_space_ldq(cs->as, addr,
- cpu_get_mem_attrs(env),
- NULL);
+ return address_space_ldq(as, addr, attrs, NULL);
}
void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stb(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stb(as, addr, val, attrs, NULL);
}
void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stl_notdirty(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stl_notdirty(as, addr, val, attrs, NULL);
}
void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stw(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stw(as, addr, val, attrs, NULL);
}
void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stl(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stl(as, addr, val, attrs, NULL);
}
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
- address_space_stq(cs->as, addr, val,
- cpu_get_mem_attrs(env),
- NULL);
+ address_space_stq(as, addr, val, attrs, NULL);
}
#endif
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 49b6115..ee36502 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -43,6 +43,7 @@
#include "standard-headers/asm-x86/hyperv.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
#include "migration/blocker.h"
#include "exec/memattrs.h"
#include "trace.h"
@@ -1254,7 +1255,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
}
- if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
+ if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
+ object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
+ pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
smram_machine_done.notify = register_smram_listener;
qemu_add_machine_init_done_notifier(&smram_machine_done);
}
@@ -1300,18 +1303,14 @@ static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
- if (rhs->unusable) {
- lhs->flags = 0;
- } else {
- lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
- (rhs->present * DESC_P_MASK) |
- (rhs->dpl << DESC_DPL_SHIFT) |
- (rhs->db << DESC_B_SHIFT) |
- (rhs->s * DESC_S_MASK) |
- (rhs->l << DESC_L_SHIFT) |
- (rhs->g * DESC_G_MASK) |
- (rhs->avl * DESC_AVL_MASK);
- }
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
}
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
@@ -3510,12 +3509,17 @@ static void kvm_update_msi_routes_all(void *private, bool global,
int cnt = 0;
MSIRouteEntry *entry;
MSIMessage msg;
+ PCIDevice *dev;
+
/* TODO: explicit route update */
QLIST_FOREACH(entry, &msi_route_list, list) {
cnt++;
- msg = pci_get_msi_message(entry->dev, entry->vector);
- kvm_irqchip_update_msi_route(kvm_state, entry->virq,
- msg, entry->dev);
+ dev = entry->dev;
+ if (!msix_enabled(dev) && !msi_enabled(dev)) {
+ continue;
+ }
+ msg = pci_get_msi_message(dev, entry->vector);
+ kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
}
kvm_irqchip_commit_routes(kvm_state);
trace_kvm_x86_update_msi_routes(cnt);
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 3cb2729..8c7a822 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -274,10 +274,6 @@ static int cpu_post_load(void *opaque, int version_id)
cpu_x86_update_dr7(env, dr7);
}
tlb_flush(cs);
-
- if (tcg_enabled()) {
- cpu_smm_update(cpu);
- }
return 0;
}
diff --git a/target/i386/smm_helper.c b/target/i386/smm_helper.c
index f051a77..90621e5 100644
--- a/target/i386/smm_helper.c
+++ b/target/i386/smm_helper.c
@@ -43,19 +43,6 @@ void helper_rsm(CPUX86State *env)
#define SMM_REVISION_ID 0x00020000
#endif
-/* Called with iothread lock taken */
-void cpu_smm_update(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
- bool smm_enabled = (env->hflags & HF_SMM_MASK);
-
- g_assert(qemu_mutex_iothread_locked());
-
- if (cpu->smram) {
- memory_region_set_enabled(cpu->smram, smm_enabled);
- }
-}
-
void do_smm_enter(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -73,7 +60,6 @@ void do_smm_enter(X86CPU *cpu)
} else {
env->hflags2 |= HF2_NMI_MASK;
}
- cpu_smm_update(cpu);
sm_state = env->smbase + 0x8000;
@@ -338,10 +324,6 @@ void helper_rsm(CPUX86State *env)
env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
env->hflags &= ~HF_SMM_MASK;
- qemu_mutex_lock_iothread();
- cpu_smm_update(cpu);
- qemu_mutex_unlock_iothread();
-
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
}
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 1d1372f..ed3b896 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -141,6 +141,7 @@ typedef struct DisasContext {
} DisasContext;
static void gen_eob(DisasContext *s);
+static void gen_jr(DisasContext *s, TCGv dest);
static void gen_jmp(DisasContext *s, target_ulong eip);
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
@@ -2153,9 +2154,9 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
gen_jmp_im(eip);
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
- /* jump to another page: currently not optimized */
+ /* jump to another page */
gen_jmp_im(eip);
- gen_eob(s);
+ gen_jr(s, cpu_tmp0);
}
}
@@ -2509,7 +2510,8 @@ static void gen_bnd_jmp(DisasContext *s)
If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
S->TF. This is used by the syscall/sysret insns. */
-static void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
+static void
+do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, TCGv jr)
{
gen_update_cc_op(s);
@@ -2530,12 +2532,27 @@ static void gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
tcg_gen_exit_tb(0);
} else if (s->tf) {
gen_helper_single_step(cpu_env);
+ } else if (!TCGV_IS_UNUSED(jr)) {
+ TCGv vaddr = tcg_temp_new();
+
+ tcg_gen_add_tl(vaddr, jr, cpu_seg_base[R_CS]);
+ tcg_gen_lookup_and_goto_ptr(vaddr);
+ tcg_temp_free(vaddr);
} else {
tcg_gen_exit_tb(0);
}
s->is_jmp = DISAS_TB_JUMP;
}
+static inline void
+gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
+{
+ TCGv unused;
+
+ TCGV_UNUSED(unused);
+ do_gen_eob_worker(s, inhibit, recheck_tf, unused);
+}
+
/* End of block.
If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
@@ -2549,6 +2566,12 @@ static void gen_eob(DisasContext *s)
gen_eob_worker(s, false, false);
}
+/* Jump to register */
+static void gen_jr(DisasContext *s, TCGv dest)
+{
+ do_gen_eob_worker(s, false, false, dest);
+}
+
/* generate a jump to eip. No segment change must happen before as a
direct call to the next block may occur */
static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
@@ -4973,7 +4996,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 3: /* lcall Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
@@ -4991,7 +5014,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_const_i32(dflag - 1),
tcg_const_i32(s->pc - s->cs_base));
}
- gen_eob(s);
+ tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, cpu_tmp4);
break;
case 4: /* jmp Ev */
if (dflag == MO_16) {
@@ -4999,7 +5023,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 5: /* ljmp Ev */
gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
@@ -5014,7 +5038,8 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_op_movl_seg_T0_vm(R_CS);
gen_op_jmp_v(cpu_T1);
}
- gen_eob(s);
+ tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, cpu_tmp4);
break;
case 6: /* push Ev */
gen_push_v(s, cpu_T0);
@@ -6394,7 +6419,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 0xc3: /* ret */
ot = gen_pop_T0(s);
@@ -6402,7 +6427,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
gen_bnd_jmp(s);
- gen_eob(s);
+ gen_jr(s, cpu_T0);
break;
case 0xca: /* lret im */
val = cpu_ldsw_code(env, s->pc);
@@ -7914,14 +7939,26 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_op_mov_v_reg(ot, cpu_T0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T0);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(ot, rm, cpu_T0);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
}
break;
default:
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index fa10b6e..f068922 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -130,6 +130,7 @@ static void m68020_cpu_initfn(Object *obj)
m68k_set_feature(env, M68K_FEATURE_FPU);
m68k_set_feature(env, M68K_FEATURE_CAS);
m68k_set_feature(env, M68K_FEATURE_BKPT);
+ m68k_set_feature(env, M68K_FEATURE_RTD);
}
#define m68030_cpu_initfn m68020_cpu_initfn
#define m68040_cpu_initfn m68020_cpu_initfn
@@ -151,6 +152,7 @@ static void m68060_cpu_initfn(Object *obj)
m68k_set_feature(env, M68K_FEATURE_FPU);
m68k_set_feature(env, M68K_FEATURE_CAS);
m68k_set_feature(env, M68K_FEATURE_BKPT);
+ m68k_set_feature(env, M68K_FEATURE_RTD);
}
static void m5208_cpu_initfn(Object *obj)
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 8095822..384ec5d 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -251,6 +251,7 @@ enum m68k_features {
M68K_FEATURE_FPU,
M68K_FEATURE_CAS,
M68K_FEATURE_BKPT,
+ M68K_FEATURE_RTD,
};
static inline int m68k_feature(CPUM68KState *env, int feature)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 9f60fbc..ad4d4ef 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -2483,6 +2483,16 @@ DISAS_INSN(nop)
{
}
+DISAS_INSN(rtd)
+{
+ TCGv tmp;
+ int16_t offset = read_im16(env, s);
+
+ tmp = gen_load(s, OS_LONG, QREG_SP, 0);
+ tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
+ gen_jmp(s, tmp);
+}
+
DISAS_INSN(rts)
{
TCGv tmp;
@@ -4904,6 +4914,7 @@ void register_m68k_insns (CPUM68KState *env)
BASE(nop, 4e71, ffff);
BASE(stop, 4e72, ffff);
BASE(rte, 4e73, ffff);
+ INSN(rtd, 4e74, ffff, RTD);
BASE(rts, 4e75, ffff);
INSN(movec, 4e7b, ffff, CF_ISA_A);
BASE(jump, 4e80, ffc0);
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 3022f34..559f8fe 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -4233,7 +4233,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
save_cpu_state(ctx, 0);
gen_helper_raise_exception_debug(cpu_env);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_PC);
}
}
@@ -10725,7 +10725,7 @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
save_cpu_state(ctx, 0);
gen_helper_raise_exception_debug(cpu_env);
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr(cpu_PC);
break;
default:
fprintf(stderr, "unknown branch 0x%x\n", proc_hflags);
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index cfec479..2f3c2e5 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -164,7 +164,7 @@ static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
- tcg_gen_exit_tb((tcg_target_long)tb + n);
+ tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
tcg_gen_exit_tb(0);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 401e10e..d10808d 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1205,6 +1205,7 @@ struct PowerPCCPU {
uint32_t compat_pvr;
PPCVirtualHypervisor *vhyp;
Object *intc;
+ int32_t node_id; /* NUMA node this CPU belongs to */
/* Fields related to migration compatibility hacks */
bool pre_2_8_migration;
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index a69005d..accef03 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -32,7 +32,6 @@
#include "qemu/error-report.h"
#include "trace.h"
#include "qapi/visitor.h"
-#include "migration/vmstate.h"
#include "exec/exec-all.h"
#ifndef CONFIG_USER_ONLY
#include "hw/hw.h"
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index c74b419..a4d31df 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -107,6 +107,8 @@ typedef struct CPUS390XState {
uint64_t cc_dst;
uint64_t cc_vr;
+ uint64_t ex_value;
+
uint64_t __excp_addr;
uint64_t psa;
@@ -393,7 +395,7 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->psw.addr;
- *cs_base = 0;
+ *cs_base = env->ex_value;
*flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
}
@@ -1033,6 +1035,8 @@ struct sysib_322 {
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define VADDR_PX 0xff000 /* page index bits */
+
#define _PAGE_RO 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_RES0 0x800 /* bit must be zero */
@@ -1084,6 +1088,7 @@ struct sysib_322 {
#define SIGP_ORDER_MASK 0x000000ff
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index b6220c8..b34318f 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -667,6 +667,32 @@ static void check_compatibility(const S390CPUModel *max_model,
"available in the configuration: ");
}
+/**
+ * The base TCG CPU model "qemu" is based on the z900. However, we already
+ * can also emulate some additional features of later CPU generations, so
+ * we add these additional feature bits here.
+ */
+static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
+{
+ static const int feats[] = {
+ S390_FEAT_STFLE,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_STFLE_45,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(feats); i++) {
+ set_bit(feats[i], fbm);
+ }
+}
+
static S390CPUModel *get_max_cpu_model(Error **errp)
{
static S390CPUModel max_model;
@@ -679,10 +705,11 @@ static S390CPUModel *get_max_cpu_model(Error **errp)
if (kvm_enabled()) {
kvm_s390_get_host_cpu_model(&max_model, errp);
} else {
- /* TCG emulates a z900 */
+ /* TCG emulates a z900 (with some optional additional features) */
max_model.def = &s390_cpu_defs[0];
bitmap_copy(max_model.features, max_model.def->default_feat,
S390_FEAT_MAX);
+ add_qemu_cpu_model_features(max_model.features);
}
if (!*errp) {
cached = true;
@@ -935,11 +962,14 @@ static void s390_host_cpu_model_initfn(Object *obj)
static void s390_qemu_cpu_model_initfn(Object *obj)
{
+ static S390CPUDef s390_qemu_cpu_defs;
S390CPU *cpu = S390_CPU(obj);
cpu->model = g_malloc0(sizeof(*cpu->model));
- /* TCG emulates a z900 */
- cpu->model->def = &s390_cpu_defs[0];
+ /* TCG emulates a z900 (with some optional additional features) */
+ memcpy(&s390_qemu_cpu_defs, &s390_cpu_defs[0], sizeof(s390_qemu_cpu_defs));
+ add_qemu_cpu_model_features(s390_qemu_cpu_defs.full_feat);
+ cpu->model->def = &s390_qemu_cpu_defs;
bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
S390_FEAT_MAX);
}
diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c
index e604e9f..26f124f 100644
--- a/target/s390x/fpu_helper.c
+++ b/target/s390x/fpu_helper.c
@@ -585,6 +585,33 @@ uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m3)
return RET128(ret);
}
+/* 32-bit FP compare and signal */
+uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float32_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare and signal */
+uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float64_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare and signal */
+uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ int cmp = float128_compare(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
/* 32-bit FP multiply and add */
uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
uint64_t f2, uint64_t f3)
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 4f8aadf..a8d20c5 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -204,7 +204,7 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
if (raddr > ram_size) {
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
(uint64_t)raddr, (uint64_t)ram_size);
- trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
+ trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER_INC);
return 1;
}
@@ -642,6 +642,11 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
+ if (env->ex_value) {
+ /* Execution of the target insn is indivisible from
+ the parent EXECUTE insn. */
+ return false;
+ }
if (env->psw.mask & PSW_MASK_EXT) {
s390_cpu_do_interrupt(cs);
return true;
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 0b70770..69249a5 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -3,8 +3,10 @@ DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
DEF_HELPER_3(mvcl, i32, env, i32, i32)
+DEF_HELPER_3(clcl, i32, env, i32, i32)
DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64)
DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64)
DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64)
@@ -12,13 +14,18 @@ DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64)
DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_4(srst, i64, env, i64, i64, i64)
DEF_HELPER_4(clst, i64, env, i64, i64, i64)
-DEF_HELPER_4(mvpg, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_4(mvst, i64, env, i64, i64, i64)
-DEF_HELPER_5(ex, i32, env, i32, i64, i64, i64)
+DEF_HELPER_4(ex, void, env, i32, i64, i64)
DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_4(mvcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(mvclu, i32, env, i32, i64, i32)
DEF_HELPER_4(clcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(clclu, i32, env, i32, i64, i32)
DEF_HELPER_3(cegb, i64, env, s64, i32)
DEF_HELPER_3(cdgb, i64, env, s64, i32)
DEF_HELPER_3(cxgb, i64, env, s64, i32)
@@ -49,6 +56,9 @@ DEF_HELPER_FLAGS_3(lexb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64)
DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
@@ -75,10 +85,17 @@ DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32)
+DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(unpka, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_4(unpku, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_3(tp, TCG_CALL_NO_WG, i32, env, i64, i32)
DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64)
DEF_HELPER_4(tre, i64, env, i64, i64, i64)
DEF_HELPER_4(trt, i32, env, i32, i64, i64)
+DEF_HELPER_5(trXX, i32, env, i32, i32, i32, i32)
DEF_HELPER_4(cksm, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
@@ -86,6 +103,8 @@ DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(stfle, i32, env, i64)
+DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
@@ -102,17 +121,18 @@ DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64)
DEF_HELPER_FLAGS_2(tprot, TCG_CALL_NO_RWG, i32, i64, i64)
DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64)
DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64)
-DEF_HELPER_3(csp, i32, env, i32, i64)
DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
DEF_HELPER_4(sigp, i32, env, i64, i32, i64)
DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
-DEF_HELPER_FLAGS_3(ipte, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(lra, i64, env, i64)
DEF_HELPER_FLAGS_2(lura, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(lurag, TCG_CALL_NO_WG, i64, env, i64)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 55a7c52..73dd05d 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -154,6 +154,12 @@
C(0xb349, CXBR, RRE, Z, x1_o, x2_o, 0, 0, cxb, 0)
C(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0)
C(0xed19, CDB, RXE, Z, f1_o, m2_64, 0, 0, cdb, 0)
+/* COMPARE AND SIGNAL */
+ C(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0)
+ C(0xb318, KDBR, RRE, Z, f1_o, f2_o, 0, 0, kdb, 0)
+ C(0xb348, KXBR, RRE, Z, x1_o, x2_o, 0, 0, kxb, 0)
+ C(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0)
+ C(0xed18, KDB, RXE, Z, f1_o, m2_64, 0, 0, kdb, 0)
/* COMPARE IMMEDIATE */
C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32)
C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64)
@@ -210,8 +216,12 @@
C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64)
C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32)
C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL LONG */
+ C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0)
/* COMPARE LOGICAL LONG EXTENDED */
C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0)
+/* COMPARE LOGICAL LONG UNICODE */
+ C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0)
/* COMPARE LOGICAL CHARACTERS UNDER MASK */
C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0)
C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0)
@@ -327,9 +337,9 @@
C(0xeb57, XIY, SIY, LD, m1_8u, i2_8u, new, m1_8, xor, nz64)
/* EXECUTE */
- C(0x4400, EX, RX_a, Z, r1_o, a2, 0, 0, ex, 0)
+ C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0)
/* EXECUTE RELATIVE LONG */
- C(0xc600, EXRL, RIL_b, EE, r1_o, ri2, 0, 0, ex, 0)
+ C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0)
/* EXTRACT ACCESS */
C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
@@ -507,6 +517,8 @@
/* LOAD PAIR DISJOINT */
D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
+/* LOAD PAIR FROM QUADWORD */
+ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0)
/* LOAD POSITIVE */
C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32)
C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64)
@@ -564,14 +576,26 @@
C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0)
C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0)
C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0)
+/* MOVE INVERSE */
+ C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0)
/* MOVE LONG */
C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0)
/* MOVE LONG EXTENDED */
C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0)
+/* MOVE LONG UNICODE */
+ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0)
+/* MOVE NUMERICS */
+ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0)
/* MOVE PAGE */
C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0)
/* MOVE STRING */
C(0xb255, MVST, RRE, Z, r1_o, r2_o, 0, 0, mvst, 0)
+/* MOVE WITH OFFSET */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0)
+/* MOVE ZONES */
+ C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0)
/* MULTIPLY */
C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0)
@@ -639,6 +663,15 @@
C(0x9600, OI, SI, Z, m1_8u, i2_8u, new, m1_8, or, nz64)
C(0xeb56, OIY, SIY, LD, m1_8u, i2_8u, new, m1_8, or, nz64)
+/* PACK */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0)
+/* PACK ASCII */
+ C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0)
+/* PACK UNICODE */
+ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0)
+
/* PREFETCH */
/* Implemented as nops of course. */
C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0)
@@ -763,6 +796,8 @@
/* STORE ACCESS MULTIPLE */
C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0)
C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0)
+/* STORE PAIR TO QUADWORD */
+ C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0)
/* SUBTRACT */
C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32)
@@ -810,11 +845,20 @@
/* SUPERVISOR CALL */
C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0)
+/* TEST ADDRESSING MODE */
+ C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0)
+
+/* TEST AND SET */
+ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0)
+
/* TEST DATA CLASS */
C(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0)
C(0xed11, TCDB, RXE, Z, f1_o, a2, 0, 0, tcdb, 0)
C(0xed12, TCXB, RXE, Z, x1_o, a2, 0, 0, tcxb, 0)
+/* TEST DECIMAL */
+ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0)
+
/* TEST UNDER MASK */
C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32)
C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32)
@@ -830,14 +874,28 @@
/* TRANSLATE EXTENDED */
C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0)
+/* TRANSLATE ONE TO ONE */
+ C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE ONE TO TWO */
+ C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO ONE */
+ C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO TWO */
+ C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+
/* UNPACK */
/* Really format SS_b, but we pack both lengths into one argument
for the helper call, so we might as well leave one 8-bit field. */
C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0)
+/* UNPACK ASCII */
+ C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0)
+/* UNPACK UNICODE */
+ C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
- C(0xb250, CSP, RRE, Z, 0, ra2, 0, 0, csp, 0)
+ D(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL)
+ D(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ)
/* DIAGNOSE (KVM hypercall) */
C(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0)
/* INSERT STORAGE KEY EXTENDED */
@@ -918,6 +976,8 @@
/* STORE USING REAL ADDRESS */
C(0xb246, STURA, RRE, Z, r1_o, r2_o, 0, 0, stura, 0)
C(0xb925, STURG, RRE, Z, r1_o, r2_o, 0, 0, sturg, 0)
+/* TEST BLOCK */
+ C(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0)
/* TEST PROTECTION */
C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0)
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
index 8503fa1..8f908bb 100644
--- a/target/s390x/machine.c
+++ b/target/s390x/machine.c
@@ -34,6 +34,7 @@ static int cpu_post_load(void *opaque, int version_id)
return 0;
}
+
static void cpu_pre_save(void *opaque)
{
S390CPU *cpu = opaque;
@@ -156,6 +157,23 @@ const VMStateDescription vmstate_riccb = {
}
};
+static bool exval_needed(void *opaque)
+{
+ S390CPU *cpu = opaque;
+ return cpu->env.ex_value != 0;
+}
+
+const VMStateDescription vmstate_exval = {
+ .name = "cpu/exval",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = exval_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.ex_value, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_s390_cpu = {
.name = "cpu",
.post_load = cpu_post_load,
@@ -188,6 +206,7 @@ const VMStateDescription vmstate_s390_cpu = {
&vmstate_fpu,
&vmstate_vregs,
&vmstate_riccb,
+ &vmstate_exval,
NULL
},
};
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index f6e5bce..80caab9 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "exec/address-spaces.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
@@ -40,15 +41,9 @@
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- int ret;
-
- ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ int ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret != 0)) {
- if (likely(retaddr)) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
}
@@ -62,18 +57,61 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
#endif
/* Reduce the length so that addr + len doesn't cross a page boundary. */
-static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
+static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
{
#ifndef CONFIG_USER_ONLY
if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
- return -addr & ~TARGET_PAGE_MASK;
+ return -(addr | TARGET_PAGE_MASK);
}
#endif
return len;
}
+/* Trigger a SPECIFICATION exception if an address or a length is not
+ naturally aligned. */
+static inline void check_alignment(CPUS390XState *env, uint64_t v,
+ int wordsize, uintptr_t ra)
+{
+ if (v % wordsize) {
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ }
+}
+
+/* Load a value from memory according to its size. */
+static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
+ int wordsize, uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ return cpu_ldub_data_ra(env, addr, ra);
+ case 2:
+ return cpu_lduw_data_ra(env, addr, ra);
+ default:
+ abort();
+ }
+}
+
+/* Store a to memory according to its size. */
+static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
+ uint64_t value, int wordsize,
+ uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ cpu_stb_data_ra(env, addr, value, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr, value, ra);
+ break;
+ default:
+ abort();
+ }
+}
+
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
- uint32_t l)
+ uint32_t l, uintptr_t ra)
{
int mmu_idx = cpu_mmu_index(env, false);
@@ -81,14 +119,14 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
if (p) {
/* Access to the whole page in write mode granted. */
- int l_adj = adj_len_to_page(l, dest);
+ uint32_t l_adj = adj_len_to_page(l, dest);
memset(p, byte, l_adj);
dest += l_adj;
l -= l_adj;
} else {
/* We failed to get access to the whole page. The next write
access will likely fill the QEMU TLB for the next iteration. */
- cpu_stb_data(env, dest, byte);
+ cpu_stb_data_ra(env, dest, byte, ra);
dest++;
l--;
}
@@ -96,7 +134,7 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
}
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
- uint32_t l)
+ uint32_t l, uintptr_t ra)
{
int mmu_idx = cpu_mmu_index(env, false);
@@ -105,7 +143,7 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
if (src_p && dest_p) {
/* Access to both whole pages granted. */
- int l_adj = adj_len_to_page(l, src);
+ uint32_t l_adj = adj_len_to_page(l, src);
l_adj = adj_len_to_page(l_adj, dest);
memmove(dest_p, src_p, l_adj);
src += l_adj;
@@ -115,7 +153,7 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
/* We failed to get access to one or both whole pages. The next
read or write access will likely fill the QEMU TLB for the
next iteration. */
- cpu_stb_data(env, dest, cpu_ldub_data(env, src));
+ cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
src++;
dest++;
l--;
@@ -124,140 +162,233 @@ static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
}
/* and on array */
-uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
- uint64_t src)
+static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x &= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
}
-/* xor on array */
-uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ return do_helper_nc(env, l, dest, src, GETPC());
+}
+
+/* xor on array */
+static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
/* xor with itself is the same as memset(0) */
if (src == dest) {
- fast_memset(env, dest, 0, l + 1);
+ fast_memset(env, dest, 0, l + 1, ra);
return 0;
}
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x ^= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
}
-/* or on array */
-uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src)
{
- int i;
- unsigned char x;
- uint32_t cc = 0;
+ return do_helper_xc(env, l, dest, src, GETPC());
+}
+
+/* or on array */
+static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ uint32_t i;
+ uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
- if (x) {
- cc = 1;
- }
- cpu_stb_data(env, dest + i, x);
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ x |= cpu_ldub_data_ra(env, dest + i, ra);
+ c |= x;
+ cpu_stb_data_ra(env, dest + i, x, ra);
}
- return cc;
+ return c != 0;
+}
+
+uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ return do_helper_oc(env, l, dest, src, GETPC());
}
/* memmove */
-void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
{
- int i = 0;
+ uint32_t i;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
+ /* mvc and memmove do not behave the same when areas overlap! */
/* mvc with source pointing to the byte after the destination is the
same as memset with the first source byte */
- if (dest == (src + 1)) {
- fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
- return;
+ if (dest == src + 1) {
+ fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra);
+ } else if (dest < src || src + l < dest) {
+ fast_memmove(env, dest, src, l + 1, ra);
+ } else {
+ /* slow version with byte accesses which always work */
+ for (i = 0; i <= l; i++) {
+ uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
+ cpu_stb_data_ra(env, dest + i, x, ra);
+ }
}
- /* mvc and memmove do not behave the same when areas overlap! */
- if ((dest < src) || (src + l < dest)) {
- fast_memmove(env, dest, src, l + 1);
- return;
+ return env->cc_op;
+}
+
+void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ do_helper_mvc(env, l, dest, src, GETPC());
+}
+
+/* move inverse */
+void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ for (i = 0; i <= l; i++) {
+ uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
+ cpu_stb_data_ra(env, dest + i, v, ra);
}
+}
+
+/* move numerics */
+void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
- /* slow version with byte accesses which always work */
for (i = 0; i <= l; i++) {
- cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
+ uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
+ v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
+ cpu_stb_data_ra(env, dest + i, v, ra);
}
}
-/* compare unsigned byte arrays */
-uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+/* move with offset */
+void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ uintptr_t ra = GETPC();
+ int len_dest = l >> 4;
+ int len_src = l & 0xf;
+ uint8_t byte_dest, byte_src;
int i;
- unsigned char x, y;
- uint32_t cc;
+
+ src += len_src;
+ dest += len_dest;
+
+ /* Handle rightmost byte */
+ byte_src = cpu_ldub_data_ra(env, src, ra);
+ byte_dest = cpu_ldub_data_ra(env, dest, ra);
+ byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
+ cpu_stb_data_ra(env, dest, byte_dest, ra);
+
+ /* Process remaining bytes from right to left */
+ for (i = 1; i <= len_dest; i++) {
+ byte_dest = byte_src >> 4;
+ if (len_src - i >= 0) {
+ byte_src = cpu_ldub_data_ra(env, src - i, ra);
+ } else {
+ byte_src = 0;
+ }
+ byte_dest |= byte_src << 4;
+ cpu_stb_data_ra(env, dest - i, byte_dest, ra);
+ }
+}
+
+/* move zones */
+void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ for (i = 0; i <= l; i++) {
+ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
+ b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
+ cpu_stb_data_ra(env, dest + i, b, ra);
+ }
+}
+
+/* compare unsigned byte arrays */
+static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
+ uint64_t s2, uintptr_t ra)
+{
+ uint32_t i;
+ uint32_t cc = 0;
HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
__func__, l, s1, s2);
+
for (i = 0; i <= l; i++) {
- x = cpu_ldub_data(env, s1 + i);
- y = cpu_ldub_data(env, s2 + i);
+ uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
+ uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
if (x < y) {
cc = 1;
- goto done;
+ break;
} else if (x > y) {
cc = 2;
- goto done;
+ break;
}
}
- cc = 0;
- done:
+
HELPER_LOG("\n");
return cc;
}
+uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+{
+ return do_helper_clc(env, l, s1, s2, GETPC());
+}
+
/* compare logical under mask */
uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
uint64_t addr)
{
- uint8_t r, d;
- uint32_t cc;
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
mask, addr);
- cc = 0;
+
while (mask) {
if (mask & 8) {
- d = cpu_ldub_data(env, addr);
- r = (r1 & 0xff000000UL) >> 24;
+ uint8_t d = cpu_ldub_data_ra(env, addr, ra);
+ uint8_t r = extract32(r1, 24, 8);
HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
addr);
if (r < d) {
@@ -272,45 +403,88 @@ uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
mask = (mask << 1) & 0xf;
r1 <<= 8;
}
+
HELPER_LOG("\n");
return cc;
}
-static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
+static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
{
- /* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
- a &= 0x7fffffff;
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode */
+ a &= 0x00ffffff;
+ } else {
+ /* 31-Bit mode */
+ a &= 0x7fffffff;
+ }
}
return a;
}
-static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
+static inline uint64_t get_address(CPUS390XState *env, int reg)
+{
+ return wrap_address(env, env->regs[reg]);
+}
+
+static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
{
- uint64_t r = d2;
- if (x2) {
- r += env->regs[x2];
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = address;
+ } else {
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode. According to the PoO it is implementation
+ dependent if bits 32-39 remain unchanged or are set to
+ zeros. Choose the former so that the function can also be
+ used for TRT. */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
+ } else {
+ /* 31-Bit mode. According to the PoO it is implementation
+ dependent if bit 32 remains unchanged or is set to zero.
+ Choose the latter so that the function can also be used for
+ TRT. */
+ address &= 0x7fffffff;
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
+ }
}
- if (b2) {
- r += env->regs[b2];
+}
+
+static inline uint64_t wrap_length(CPUS390XState *env, uint64_t length)
+{
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ /* 24-Bit and 31-Bit mode */
+ length &= 0x7fffffff;
}
- return fix_address(env, r);
+ return length;
}
-static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
+static inline uint64_t get_length(CPUS390XState *env, int reg)
{
- return fix_address(env, env->regs[reg]);
+ return wrap_length(env, env->regs[reg]);
+}
+
+static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
+{
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = length;
+ } else {
+ /* 24-Bit and 31-Bit mode */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
+ }
}
/* search string (c is byte to search, r2 is string, r1 end of string) */
uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
uint64_t str)
{
+ uintptr_t ra = GETPC();
uint32_t len;
uint8_t v, c = r0;
- str = fix_address(env, str);
- end = fix_address(env, end);
+ str = wrap_address(env, str);
+ end = wrap_address(env, end);
/* Assume for now that R2 is unmodified. */
env->retxl = str;
@@ -323,7 +497,7 @@ uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
env->cc_op = 2;
return end;
}
- v = cpu_ldub_data(env, str + len);
+ v = cpu_ldub_data_ra(env, str + len, ra);
if (v == c) {
/* Character found. Set R1 to the location; R2 is unmodified. */
env->cc_op = 1;
@@ -340,17 +514,18 @@ uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
/* unsigned string compare (c is string terminator) */
uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
{
+ uintptr_t ra = GETPC();
uint32_t len;
c = c & 0xff;
- s1 = fix_address(env, s1);
- s2 = fix_address(env, s2);
+ s1 = wrap_address(env, s1);
+ s2 = wrap_address(env, s2);
/* Lest we fail to service interrupts in a timely manner, limit the
amount of work we're willing to do. For now, let's cap at 8k. */
for (len = 0; len < 0x2000; ++len) {
- uint8_t v1 = cpu_ldub_data(env, s1 + len);
- uint8_t v2 = cpu_ldub_data(env, s2 + len);
+ uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
+ uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
if (v1 == v2) {
if (v1 == c) {
/* Equal. CC=0, and don't advance the registers. */
@@ -375,27 +550,29 @@ uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
}
/* move page */
-void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
+uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
{
- /* XXX missing r0 handling */
- env->cc_op = 0;
- fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
+ /* ??? missing r0 handling, which includes access keys, but more
+ importantly optional suppression of the exception! */
+ fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
+ return 0; /* data moved */
}
/* string copy (c is string terminator) */
uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
{
+ uintptr_t ra = GETPC();
uint32_t len;
c = c & 0xff;
- d = fix_address(env, d);
- s = fix_address(env, s);
+ d = wrap_address(env, d);
+ s = wrap_address(env, s);
/* Lest we fail to service interrupts in a timely manner, limit the
amount of work we're willing to do. For now, let's cap at 8k. */
for (len = 0; len < 0x2000; ++len) {
- uint8_t v = cpu_ldub_data(env, s + len);
- cpu_stb_data(env, d + len, v);
+ uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
+ cpu_stb_data_ra(env, d + len, v, ra);
if (v == c) {
/* Complete. Set CC=1 and advance R1. */
env->cc_op = 1;
@@ -410,124 +587,14 @@ uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
return d + len;
}
-static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
- uint32_t mask)
-{
- int pos = 24; /* top of the lower half of r1 */
- uint64_t rmask = 0xff000000ULL;
- uint8_t val = 0;
- int ccd = 0;
- uint32_t cc = 0;
-
- while (mask) {
- if (mask & 8) {
- env->regs[r1] &= ~rmask;
- val = cpu_ldub_data(env, address);
- if ((val & 0x80) && !ccd) {
- cc = 1;
- }
- ccd = 1;
- if (val && cc == 0) {
- cc = 2;
- }
- env->regs[r1] |= (uint64_t)val << pos;
- address++;
- }
- mask = (mask << 1) & 0xf;
- pos -= 8;
- rmask >>= 8;
- }
-
- return cc;
-}
-
-/* execute instruction
- this instruction executes an insn modified with the contents of r1
- it does not change the executed instruction in memory
- it does not change the program counter
- in other words: tricky...
- currently implemented by interpreting the cases it is most commonly used in
-*/
-uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
- uint64_t addr, uint64_t ret)
-{
- S390CPU *cpu = s390_env_get_cpu(env);
- uint16_t insn = cpu_lduw_code(env, addr);
-
- HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
- insn);
- if ((insn & 0xf0ff) == 0xd000) {
- uint32_t l, insn2, b1, b2, d1, d2;
-
- l = v1 & 0xff;
- insn2 = cpu_ldl_code(env, addr + 2);
- b1 = (insn2 >> 28) & 0xf;
- b2 = (insn2 >> 12) & 0xf;
- d1 = (insn2 >> 16) & 0xfff;
- d2 = insn2 & 0xfff;
- switch (insn & 0xf00) {
- case 0x200:
- helper_mvc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x400:
- cc = helper_nc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x500:
- cc = helper_clc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x600:
- cc = helper_oc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0x700:
- cc = helper_xc(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0xc00:
- helper_tr(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- case 0xd00:
- cc = helper_trt(env, l, get_address(env, 0, b1, d1),
- get_address(env, 0, b2, d2));
- break;
- default:
- goto abort;
- }
- } else if ((insn & 0xff00) == 0x0a00) {
- /* supervisor call */
- HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
- env->psw.addr = ret - 4;
- env->int_svc_code = (insn | v1) & 0xff;
- env->int_svc_ilen = 4;
- helper_exception(env, EXCP_SVC);
- } else if ((insn & 0xff00) == 0xbf00) {
- uint32_t insn2, r1, r3, b2, d2;
-
- insn2 = cpu_ldl_code(env, addr + 2);
- r1 = (insn2 >> 20) & 0xf;
- r3 = (insn2 >> 16) & 0xf;
- b2 = (insn2 >> 12) & 0xf;
- d2 = insn2 & 0xfff;
- cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
- } else {
- abort:
- cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
- insn);
- }
- return cc;
-}
-
/* load access registers r1 to r3 from memory at a2 */
void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
int i;
for (i = r1;; i = (i + 1) % 16) {
- env->aregs[i] = cpu_ldl_data(env, a2);
+ env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
a2 += 4;
if (i == r3) {
@@ -539,10 +606,11 @@ void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
/* store access registers r1 to r3 in memory at a2 */
void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
int i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stl_data(env, a2, env->aregs[i]);
+ cpu_stl_data_ra(env, a2, env->aregs[i], ra);
a2 += 4;
if (i == r3) {
@@ -551,131 +619,230 @@ void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
-/* move long */
-uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+/* move long helper */
+static inline uint32_t do_mvcl(CPUS390XState *env,
+ uint64_t *dest, uint64_t *destlen,
+ uint64_t *src, uint64_t *srclen,
+ uint16_t pad, int wordsize, uintptr_t ra)
{
- uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
- uint64_t dest = get_address_31fix(env, r1);
- uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
- uint64_t src = get_address_31fix(env, r2);
- uint8_t pad = env->regs[r2 + 1] >> 24;
- uint8_t v;
+ uint64_t len = MIN(*srclen, *destlen);
uint32_t cc;
- if (destlen == srclen) {
+ if (*destlen == *srclen) {
cc = 0;
- } else if (destlen < srclen) {
+ } else if (*destlen < *srclen) {
cc = 1;
} else {
cc = 2;
}
- if (srclen > destlen) {
- srclen = destlen;
- }
+ /* Copy the src array */
+ fast_memmove(env, *dest, *src, len, ra);
+ *src += len;
+ *srclen -= len;
+ *dest += len;
+ *destlen -= len;
- for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
- v = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, v);
+ /* Pad the remaining area */
+ if (wordsize == 1) {
+ fast_memset(env, *dest, pad, *destlen, ra);
+ *dest += *destlen;
+ *destlen = 0;
+ } else {
+ /* If remaining length is odd, pad with odd byte first. */
+ if (*destlen & 1) {
+ cpu_stb_data_ra(env, *dest, pad & 0xff, ra);
+ *dest += 1;
+ *destlen -= 1;
+ }
+ /* The remaining length is even, pad using words. */
+ for (; *destlen; *dest += 2, *destlen -= 2) {
+ cpu_stw_data_ra(env, *dest, pad, ra);
+ }
}
- for (; destlen; dest++, destlen--) {
- cpu_stb_data(env, dest, pad);
- }
+ return cc;
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint32_t cc;
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- env->regs[r2 + 1] -= src - env->regs[r2];
- env->regs[r1] = dest;
- env->regs[r2] = src;
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
+
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r2, src);
return cc;
}
-/* move long extended another memcopy insn with more bells and whistles */
+/* move long extended */
uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
uint32_t r3)
{
- uint64_t destlen = env->regs[r1 + 1];
- uint64_t dest = env->regs[r1];
- uint64_t srclen = env->regs[r3 + 1];
- uint64_t src = env->regs[r3];
- uint8_t pad = a2 & 0xff;
- uint8_t v;
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint8_t pad = a2;
uint32_t cc;
- if (!(env->psw.mask & PSW_MASK_64)) {
- destlen = (uint32_t)destlen;
- srclen = (uint32_t)srclen;
- dest &= 0x7fffffff;
- src &= 0x7fffffff;
- }
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
- if (destlen == srclen) {
- cc = 0;
- } else if (destlen < srclen) {
- cc = 1;
- } else {
- cc = 2;
- }
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
- if (srclen > destlen) {
- srclen = destlen;
- }
+ return cc;
+}
- for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
- v = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, v);
- }
+/* move long unicode */
+uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc;
- for (; destlen; dest++, destlen--) {
- cpu_stb_data(env, dest, pad);
- }
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- /* FIXME: 31-bit mode! */
- env->regs[r3 + 1] -= src - env->regs[r3];
- env->regs[r1] = dest;
- env->regs[r3] = src;
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
return cc;
}
-/* compare logical long extended memcompare insn with padding */
-uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
- uint32_t r3)
+/* compare logical long helper */
+static inline uint32_t do_clcl(CPUS390XState *env,
+ uint64_t *src1, uint64_t *src1len,
+ uint64_t *src3, uint64_t *src3len,
+ uint16_t pad, uint64_t limit,
+ int wordsize, uintptr_t ra)
{
- uint64_t destlen = env->regs[r1 + 1];
- uint64_t dest = get_address_31fix(env, r1);
- uint64_t srclen = env->regs[r3 + 1];
- uint64_t src = get_address_31fix(env, r3);
- uint8_t pad = a2 & 0xff;
- uint8_t v1 = 0, v2 = 0;
+ uint64_t len = MAX(*src1len, *src3len);
uint32_t cc = 0;
- if (!(destlen || srclen)) {
+ check_alignment(env, *src1len | *src3len, wordsize, ra);
+
+ if (!len) {
return cc;
}
- if (srclen > destlen) {
- srclen = destlen;
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. */
+ if (len > limit) {
+ len = limit;
+ cc = 3;
}
- for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
- v1 = srclen ? cpu_ldub_data(env, src) : pad;
- v2 = destlen ? cpu_ldub_data(env, dest) : pad;
- if (v1 != v2) {
- cc = (v1 < v2) ? 1 : 2;
+ for (; len; len -= wordsize) {
+ uint16_t v1 = pad;
+ uint16_t v3 = pad;
+
+ if (*src1len) {
+ v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
+ }
+ if (*src3len) {
+ v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
+ }
+
+ if (v1 != v3) {
+ cc = (v1 < v3) ? 1 : 2;
break;
}
+
+ if (*src1len) {
+ *src1 += wordsize;
+ *src1len -= wordsize;
+ }
+ if (*src3len) {
+ *src3 += wordsize;
+ *src3len -= wordsize;
+ }
}
- env->regs[r1 + 1] = destlen;
- /* can't use srclen here, we trunc'ed it */
- env->regs[r3 + 1] -= src - env->regs[r3];
- env->regs[r1] = dest;
- env->regs[r3] = src;
+ return cc;
+}
+
+
+/* compare logical long */
+uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
+ uint64_t src3 = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
+
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r2, src3);
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint8_t pad = a2;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
+
+ return cc;
+}
+
+/* compare logical long unicode memcompare insn with padding */
+uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc = 0;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
return cc;
}
@@ -684,6 +851,7 @@ uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
uint64_t src, uint64_t src_len)
{
+ uintptr_t ra = GETPC();
uint64_t max_len, len;
uint64_t cksm = (uint32_t)r1;
@@ -693,21 +861,21 @@ uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
/* Process full words as available. */
for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
- cksm += (uint32_t)cpu_ldl_data(env, src);
+ cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
}
switch (max_len - len) {
case 1:
- cksm += cpu_ldub_data(env, src) << 24;
+ cksm += cpu_ldub_data_ra(env, src, ra) << 24;
len += 1;
break;
case 2:
- cksm += cpu_lduw_data(env, src) << 16;
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
len += 2;
break;
case 3:
- cksm += cpu_lduw_data(env, src) << 16;
- cksm += cpu_ldub_data(env, src + 2) << 8;
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
+ cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
len += 3;
break;
}
@@ -726,9 +894,94 @@ uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
return len;
}
+void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
+ src--;
+ len_src--;
+
+ /* now pack every value */
+ while (len_dest >= 0) {
+ b = 0;
+
+ if (len_src > 0) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src--;
+ len_src--;
+ }
+ if (len_src > 0) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src--;
+ len_src--;
+ }
+
+ len_dest--;
+ dest--;
+ cpu_stb_data_ra(env, dest, b, ra);
+ }
+}
+
+static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen, int ssize, uintptr_t ra)
+{
+ int i;
+ /* The destination operand is always 16 bytes long. */
+ const int destlen = 16;
+
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - 1;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = 0;
+
+ /* Start with a positive sign */
+ if (i == 0) {
+ b = 0xc;
+ } else if (srclen > ssize) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ if (srclen > ssize) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ cpu_stb_data_ra(env, dest, b, ra);
+ dest--;
+ }
+}
+
+
+void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 1, GETPC());
+}
+
+void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 2, GETPC());
+}
+
void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
uint64_t src)
{
+ uintptr_t ra = GETPC();
int len_dest = len >> 4;
int len_src = len & 0xf;
uint8_t b;
@@ -738,8 +991,8 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
src += len_src;
/* last byte is special, it only flips the nibbles */
- b = cpu_ldub_data(env, src);
- cpu_stb_data(env, dest, (b << 4) | (b >> 4));
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
src--;
len_src--;
@@ -749,7 +1002,7 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
uint8_t cur_byte = 0;
if (len_src > 0) {
- cur_byte = cpu_ldub_data(env, src);
+ cur_byte = cpu_ldub_data_ra(env, src, ra);
}
len_dest--;
@@ -768,29 +1021,124 @@ void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
/* zone bits */
cur_byte |= 0xf0;
- cpu_stb_data(env, dest, cur_byte);
+ cpu_stb_data_ra(env, dest, cur_byte, ra);
}
}
-void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
- uint64_t trans)
+static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
+ uint32_t destlen, int dsize, uint64_t src,
+ uintptr_t ra)
{
int i;
+ uint32_t cc;
+ uint8_t b;
+ /* The source operand is always 16 bytes long. */
+ const int srclen = 16;
- for (i = 0; i <= len; i++) {
- uint8_t byte = cpu_ldub_data(env, array + i);
- uint8_t new_byte = cpu_ldub_data(env, trans + byte);
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - dsize;
+
+ /* Check for the sign. */
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ switch (b & 0xf) {
+ case 0xa:
+ case 0xc:
+ case 0xe ... 0xf:
+ cc = 0; /* plus */
+ break;
+ case 0xb:
+ case 0xd:
+ cc = 1; /* minus */
+ break;
+ default:
+ case 0x0 ... 0x9:
+ cc = 3; /* invalid */
+ break;
+ }
- cpu_stb_data(env, array + i, new_byte);
+ /* Now pad every nibble with 0x30, advancing one nibble at a time. */
+ for (i = 0; i < destlen; i += dsize) {
+ if (i == (31 * dsize)) {
+ /* If length is 32/64 bytes, the leftmost byte is 0. */
+ b = 0;
+ } else if (i % (2 * dsize)) {
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ } else {
+ b >>= 4;
+ }
+ cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
+ dest -= dsize;
}
+
+ return cc;
+}
+
+uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 1, src, GETPC());
+}
+
+uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 2, src, GETPC());
+}
+
+uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
+{
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
+ int i;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
+ /* digit */
+ cc |= (b & 0xf0) > 0x90 ? 2 : 0;
+
+ if (i == (destlen - 1)) {
+ /* sign */
+ cc |= (b & 0xf) < 0xa ? 1 : 0;
+ } else {
+ /* digit */
+ cc |= (b & 0xf) > 0x9 ? 2 : 0;
+ }
+ }
+
+ return cc;
+}
+
+static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans, uintptr_t ra)
+{
+ uint32_t i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
+ uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
+ }
+
+ return env->cc_op;
+}
+
+void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ do_helper_tr(env, len, array, trans, GETPC());
}
uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
uint64_t len, uint64_t trans)
{
+ uintptr_t ra = GETPC();
uint8_t end = env->regs[0] & 0xff;
uint64_t l = len;
uint64_t i;
+ uint32_t cc = 0;
if (!(env->psw.mask & PSW_MASK_64)) {
array &= 0x7fffffff;
@@ -801,47 +1149,95 @@ uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
amount of work we're willing to do. For now, let's cap at 8k. */
if (l > 0x2000) {
l = 0x2000;
- env->cc_op = 3;
- } else {
- env->cc_op = 0;
+ cc = 3;
}
for (i = 0; i < l; i++) {
uint8_t byte, new_byte;
- byte = cpu_ldub_data(env, array + i);
+ byte = cpu_ldub_data_ra(env, array + i, ra);
if (byte == end) {
- env->cc_op = 1;
+ cc = 1;
break;
}
- new_byte = cpu_ldub_data(env, trans + byte);
- cpu_stb_data(env, array + i, new_byte);
+ new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
}
+ env->cc_op = cc;
env->retxl = len - i;
return array + i;
}
+static uint32_t do_helper_trt(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans, uintptr_t ra)
+{
+ uint32_t i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
+ uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
+
+ if (sbyte != 0) {
+ set_address(env, 1, array + i);
+ env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
+ return (i == len) ? 2 : 1;
+ }
+ }
+
+ return 0;
+}
+
uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
uint64_t trans)
{
- uint32_t cc = 0;
+ return do_helper_trt(env, len, array, trans, GETPC());
+}
+
+/* Translate one/two to one/two */
+uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
+ uint32_t tst, uint32_t sizes)
+{
+ uintptr_t ra = GETPC();
+ int dsize = (sizes & 1) ? 1 : 2;
+ int ssize = (sizes & 2) ? 1 : 2;
+ uint64_t tbl = get_address(env, 1) & ~7;
+ uint64_t dst = get_address(env, r1);
+ uint64_t len = get_length(env, r1 + 1);
+ uint64_t src = get_address(env, r2);
+ uint32_t cc = 3;
int i;
- for (i = 0; i <= len; i++) {
- uint8_t byte = cpu_ldub_data(env, array + i);
- uint8_t sbyte = cpu_ldub_data(env, trans + byte);
+ check_alignment(env, len, ssize, ra);
- if (sbyte != 0) {
- env->regs[1] = array + i;
- env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
- cc = (i == len) ? 2 : 1;
+ /* Lest we fail to service interrupts in a timely manner, */
+ /* limit the amount of work we're willing to do. */
+ for (i = 0; i < 0x2000; i++) {
+ uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
+ uint64_t tble = tbl + (sval * dsize);
+ uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
+ if (dval == tst) {
+ cc = 1;
+ break;
+ }
+ cpu_stsize_data_ra(env, dst, dval, dsize, ra);
+
+ len -= ssize;
+ src += ssize;
+ dst += dsize;
+
+ if (len == 0) {
+ cc = 0;
break;
}
}
+ set_address(env, r1, dst);
+ set_length(env, r1 + 1, len);
+ set_address(env, r2, src);
+
return cc;
}
@@ -866,6 +1262,8 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
} else {
uint64_t oldh, oldl;
+ check_alignment(env, addr, 16, ra);
+
oldh = cpu_ldq_data_ra(env, addr + 0, ra);
oldl = cpu_ldq_data_ra(env, addr + 8, ra);
@@ -887,20 +1285,20 @@ void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
#if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
S390CPU *cpu = s390_env_get_cpu(env);
bool PERchanged = false;
- int i;
uint64_t src = a2;
- uint64_t val;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- val = cpu_ldq_data(env, src);
+ uint64_t val = cpu_ldq_data_ra(env, src, ra);
if (env->cregs[i] != val && i >= 9 && i <= 11) {
PERchanged = true;
}
env->cregs[i] = val;
HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
- i, src, env->cregs[i]);
+ i, src, val);
src += sizeof(uint64_t);
if (i == r3) {
@@ -917,18 +1315,19 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ uintptr_t ra = GETPC();
S390CPU *cpu = s390_env_get_cpu(env);
bool PERchanged = false;
- int i;
uint64_t src = a2;
- uint32_t val;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- val = cpu_ldl_data(env, src);
+ uint32_t val = cpu_ldl_data_ra(env, src, ra);
if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
PERchanged = true;
}
- env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
+ env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
src += sizeof(uint32_t);
if (i == r3) {
@@ -945,11 +1344,12 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
- int i;
+ uintptr_t ra = GETPC();
uint64_t dest = a2;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stq_data(env, dest, env->cregs[i]);
+ cpu_stq_data_ra(env, dest, env->cregs[i], ra);
dest += sizeof(uint64_t);
if (i == r3) {
@@ -960,11 +1360,12 @@ void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
- int i;
+ uintptr_t ra = GETPC();
uint64_t dest = a2;
+ uint32_t i;
for (i = r1;; i = (i + 1) % 16) {
- cpu_stl_data(env, dest, env->cregs[i]);
+ cpu_stl_data_ra(env, dest, env->cregs[i], ra);
dest += sizeof(uint32_t);
if (i == r3) {
@@ -973,10 +1374,39 @@ void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
+uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
+{
+ uintptr_t ra = GETPC();
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint64_t abs_addr;
+ int i;
+
+ real_addr = wrap_address(env, real_addr);
+ abs_addr = mmu_real2abs(env, real_addr) & TARGET_PAGE_MASK;
+ if (!address_space_access_valid(&address_space_memory, abs_addr,
+ TARGET_PAGE_SIZE, true)) {
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_ADDRESSING, 4);
+ return 1;
+ }
+
+ /* Check low-address protection */
+ if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
+ cpu_restore_state(cs, ra);
+ program_interrupt(env, PGM_PROTECTION, 4);
+ return 1;
+ }
+
+ for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
+ stq_phys(cs->as, abs_addr + i, 0);
+ }
+
+ return 0;
+}
+
uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
{
/* XXX implement */
-
return 0;
}
@@ -985,7 +1415,7 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
{
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
- uint64_t addr = get_address(env, 0, 0, r2);
+ uint64_t addr = wrap_address(env, r2);
uint8_t key;
if (addr > ram_size) {
@@ -1008,7 +1438,7 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
- uint64_t addr = get_address(env, 0, 0, r2);
+ uint64_t addr = wrap_address(env, r2);
uint8_t key;
if (addr > ram_size) {
@@ -1063,32 +1493,9 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
return re >> 1;
}
-/* compare and swap and purge */
-uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
-{
- S390CPU *cpu = s390_env_get_cpu(env);
- uint32_t cc;
- uint32_t o1 = env->regs[r1];
- uint64_t a2 = r2 & ~3ULL;
- uint32_t o2 = cpu_ldl_data(env, a2);
-
- if (o1 == o2) {
- cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
- if (r2 & 0x3) {
- /* flush TLB / ALB */
- tlb_flush(CPU(cpu));
- }
- cc = 0;
- } else {
- env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
- cc = 1;
- }
-
- return cc;
-}
-
uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
+ uintptr_t ra = GETPC();
int cc = 0, i;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
@@ -1102,7 +1509,8 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* XXX replace w/ memcpy */
for (i = 0; i < l; i++) {
- cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
+ uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
+ cpu_stb_secondary_ra(env, a1 + i, x, ra);
}
return cc;
@@ -1110,6 +1518,7 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
+ uintptr_t ra = GETPC();
int cc = 0, i;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
@@ -1123,36 +1532,45 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* XXX replace w/ memcpy */
for (i = 0; i < l; i++) {
- cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
+ uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
+ cpu_stb_primary_ra(env, a1 + i, x, ra);
}
return cc;
}
/* invalidate pte */
-void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
+void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
+ uint32_t m4)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t page = vaddr & TARGET_PAGE_MASK;
- uint64_t pte = 0;
+ uint64_t pte_addr, pte;
- /* XXX broadcast to other CPUs */
+ /* Compute the page table entry address */
+ pte_addr = (pto & _SEGMENT_ENTRY_ORIGIN);
+ pte_addr += (vaddr & VADDR_PX) >> 9;
- /* XXX Linux is nice enough to give us the exact pte address.
- According to spec we'd have to find it out ourselves */
- /* XXX Linux is fine with overwriting the pte, the spec requires
- us to only set the invalid bit */
- stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
+ /* Mark the page table entry as invalid */
+ pte = ldq_phys(cs->as, pte_addr);
+ pte |= _PAGE_INVALID;
+ stq_phys(cs->as, pte_addr, pte);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
- tlb_flush_page(cs, page);
+ /* XXX: the LC bit should be considered as 0 if the local-TLB-clearing
+ facility is not installed. */
+ if (m4 & 1) {
+ tlb_flush_page(cs, page);
+ } else {
+ tlb_flush_page_all_cpus_synced(cs, page);
+ }
/* XXX 31-bit hack */
- if (page & 0x80000000) {
- tlb_flush_page(cs, page & ~0x80000000);
+ if (m4 & 1) {
+ tlb_flush_page(cs, page ^ 0x80000000);
} else {
- tlb_flush_page(cs, page | 0x80000000);
+ tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
}
}
@@ -1164,19 +1582,27 @@ void HELPER(ptlb)(CPUS390XState *env)
tlb_flush(CPU(cpu));
}
+/* flush global tlb */
+void HELPER(purge)(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ tlb_flush_all_cpus_synced(CPU(cpu));
+}
+
/* load using real address */
uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
+ return (uint32_t)ldl_phys(cs->as, wrap_address(env, addr));
}
uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- return ldq_phys(cs->as, get_address(env, 0, 0, addr));
+ return ldq_phys(cs->as, wrap_address(env, addr));
}
/* store using real address */
@@ -1184,7 +1610,7 @@ void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
+ stl_phys(cs->as, wrap_address(env, addr), (uint32_t)v1);
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
@@ -1199,7 +1625,7 @@ void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
- stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
+ stq_phys(cs->as, wrap_address(env, addr), v1);
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
@@ -1215,17 +1641,17 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
uint32_t cc = 0;
- int old_exc = cs->exception_index;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t ret;
- int flags;
+ int old_exc, flags;
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ cpu_restore_state(cs, GETPC());
program_interrupt(env, PGM_SPECIAL_OP, 2);
}
- cs->exception_index = old_exc;
+ old_exc = cs->exception_index;
if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
cc = 3;
}
@@ -1240,3 +1666,126 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
return ret;
}
#endif
+
+/* load pair from quadword */
+uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t hi, lo;
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
+ hi = int128_gethi(v);
+ lo = int128_getlo(v);
+#endif
+ } else {
+ check_alignment(env, addr, 16, ra);
+
+ hi = cpu_ldq_data_ra(env, addr + 0, ra);
+ lo = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+
+ env->retxl = lo;
+ return hi;
+}
+
+/* store pair to quadword */
+void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ uintptr_t ra = GETPC();
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+
+ Int128 v = int128_make128(low, high);
+ helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
+#endif
+ } else {
+ check_alignment(env, addr, 16, ra);
+
+ cpu_stq_data_ra(env, addr + 0, high, ra);
+ cpu_stq_data_ra(env, addr + 8, low, ra);
+ }
+}
+
+/* Execute instruction. This instruction executes an insn modified with
+ the contents of r1. It does not change the executed instruction in memory;
+ it does not change the program counter.
+
+ Perform this by recording the modified instruction in env->ex_value.
+ This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
+*/
+void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
+{
+ uint64_t insn = cpu_lduw_code(env, addr);
+ uint8_t opc = insn >> 8;
+
+ /* Or in the contents of R1[56:63]. */
+ insn |= r1 & 0xff;
+
+ /* Load the rest of the instruction. */
+ insn <<= 48;
+ switch (get_ilen(opc)) {
+ case 2:
+ break;
+ case 4:
+ insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
+ break;
+ case 6:
+ insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* The very most common cases can be sped up by avoiding a new TB. */
+ if ((opc & 0xf0) == 0xd0) {
+ typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
+ uint64_t, uintptr_t);
+ static const dx_helper dx[16] = {
+ [0x2] = do_helper_mvc,
+ [0x4] = do_helper_nc,
+ [0x5] = do_helper_clc,
+ [0x6] = do_helper_oc,
+ [0x7] = do_helper_xc,
+ [0xc] = do_helper_tr,
+ [0xd] = do_helper_trt,
+ };
+ dx_helper helper = dx[opc & 0xf];
+
+ if (helper) {
+ uint32_t l = extract64(insn, 48, 8);
+ uint32_t b1 = extract64(insn, 44, 4);
+ uint32_t d1 = extract64(insn, 32, 12);
+ uint32_t b2 = extract64(insn, 28, 4);
+ uint32_t d2 = extract64(insn, 16, 12);
+ uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
+ uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
+
+ env->cc_op = helper(env, l, a1, a2, 0);
+ env->psw.addr += ilen;
+ return;
+ }
+ } else if (opc == 0x0a) {
+ env->int_svc_code = extract64(insn, 48, 8);
+ env->int_svc_ilen = ilen;
+ helper_exception(env, EXCP_SVC);
+ g_assert_not_reached();
+ }
+
+ /* Record the insn we want to execute as well as the ilen to use
+ during the execution of the target insn. This will also ensure
+ that ex_value is non-zero, which flags that we are in a state
+ that requires such execution. */
+ env->ex_value = insn | ilen;
+}
diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c
index 1b9f448..edcdf17 100644
--- a/target/s390x/misc_helper.c
+++ b/target/s390x/misc_helper.c
@@ -80,8 +80,6 @@ void HELPER(exception)(CPUS390XState *env, uint32_t excp)
cpu_loop_exit(cs);
}
-#ifndef CONFIG_USER_ONLY
-
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
{
S390CPU *cpu = s390_env_get_cpu(env);
@@ -108,6 +106,8 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
}
}
+#ifndef CONFIG_USER_ONLY
+
/* SCLP service call */
uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index b11a027..501e390 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -108,7 +108,7 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
* Translate real address to absolute (= physical)
* address by taking care of the prefix mapping.
*/
-static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
{
if (raddr < 0x2000) {
return raddr + env->psa; /* Map the lowcore. */
@@ -143,8 +143,6 @@ static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
return 0;
}
-#define VADDR_PX 0xff000 /* Page index bits */
-
/* Decode segment table entry */
static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t st_entry,
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 4c48c59..95f91d4 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -57,7 +57,9 @@ struct DisasContext {
struct TranslationBlock *tb;
const DisasInsn *insn;
DisasFields *fields;
+ uint64_t ex_value;
uint64_t pc, next_pc;
+ uint32_t ilen;
enum cc_op cc_op;
bool singlestep_enabled;
};
@@ -349,7 +351,7 @@ static void gen_program_exception(DisasContext *s, int code)
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
tcg_temp_free_i32(tmp);
- tmp = tcg_const_i32(s->next_pc - s->pc);
+ tmp = tcg_const_i32(s->ilen);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
tcg_temp_free_i32(tmp);
@@ -608,11 +610,16 @@ static void gen_op_calc_cc(DisasContext *s)
set_cc_static(s);
}
-static int use_goto_tb(DisasContext *s, uint64_t dest)
+static bool use_exit_tb(DisasContext *s)
{
- if (unlikely(s->singlestep_enabled) ||
- (s->tb->cflags & CF_LAST_IO) ||
- (s->tb->flags & FLAG_MASK_PER)) {
+ return (s->singlestep_enabled ||
+ (s->tb->cflags & CF_LAST_IO) ||
+ (s->tb->flags & FLAG_MASK_PER));
+}
+
+static bool use_goto_tb(DisasContext *s, uint64_t dest)
+{
+ if (unlikely(use_exit_tb(s))) {
return false;
}
#ifndef CONFIG_USER_ONLY
@@ -1162,6 +1169,8 @@ typedef enum {
the PC (for whatever reason), so there's no need to do it again on
exiting the TB. */
EXIT_PC_UPDATED,
+ /* We have updated the PC and CC values. */
+ EXIT_PC_CC_UPDATED,
/* We are exiting the TB, but have neither emitted a goto_tb, nor
updated the PC for the next instruction to be executed. */
EXIT_PC_STALE,
@@ -1195,6 +1204,8 @@ typedef enum DisasFacility {
FAC_SFLE, /* store facility list extended */
FAC_ILA, /* interlocked access facility 1 */
FAC_LPP, /* load-program-parameter */
+ FAC_DAT_ENH, /* DAT-enhancement */
+ FAC_E2, /* extended-translation facility 2 */
} DisasFacility;
struct DisasInsn {
@@ -1866,7 +1877,6 @@ static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
int r2 = get_field(s->fields, r2);
TCGv_i64 len = tcg_temp_new_i64();
- potential_page_fault(s);
gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
return_low128(o->out);
@@ -1901,7 +1911,6 @@ static ExitStatus op_clc(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
break;
default:
- potential_page_fault(s);
vl = tcg_const_i32(l);
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
tcg_temp_free_i32(vl);
@@ -1912,14 +1921,65 @@ static ExitStatus op_clc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r2 = get_field(s->fields, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_clcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
set_cc_static(s);
return NO_EXIT;
}
@@ -1929,7 +1989,6 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o)
TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
TCGv_i32 t1 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t1, o->in1);
- potential_page_fault(s);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
tcg_temp_free_i32(t1);
@@ -1939,7 +1998,6 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o)
static ExitStatus op_clst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
@@ -2006,11 +2064,45 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static ExitStatus op_csp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGMemOp mop = s->insn->data;
+ TCGv_i64 addr, old, cc;
+ TCGLabel *lab = gen_new_label();
+
+ /* Note that in1 = R1 (zero-extended expected value),
+ out = R1 (original reg), out2 = R1+1 (new value). */
+
check_privileged(s);
- gen_helper_csp(cc_op, cpu_env, r1, o->in2);
- tcg_temp_free_i32(r1);
- set_cc_static(s);
+ addr = tcg_temp_new_i64();
+ old = tcg_temp_new_i64();
+ tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
+ tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
+ get_mem_index(s), mop | MO_ALIGN);
+ tcg_temp_free_i64(addr);
+
+ /* Are the memory and expected values (un)equal? */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+
+ /* Write back the output now, so that it happens before the
+ following branch, so that we don't need local temps. */
+ if ((mop & MO_SIZE) == MO_32) {
+ tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
+ } else {
+ tcg_gen_mov_i64(o->out, old);
+ }
+ tcg_temp_free_i64(old);
+
+ /* If the comparison was equal, and the LSB of R2 was set,
+ then we need to flush the TLB (for all cpus). */
+ tcg_gen_xori_i64(cc, cc, 1);
+ tcg_gen_and_i64(cc, cc, o->in2);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
+ tcg_temp_free_i64(cc);
+
+ gen_helper_purge(cpu_env);
+ gen_set_label(lab);
+
return NO_EXIT;
}
#endif
@@ -2153,27 +2245,34 @@ static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
static ExitStatus op_ex(DisasContext *s, DisasOps *o)
{
- /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
- tb->flags, (ab)use the tb->cs_base field as the address of
- the template in memory, and grab 8 bits of tb->flags/cflags for
- the contents of the register. We would then recognize all this
- in gen_intermediate_code_internal, generating code for exactly
- one instruction. This new TB then gets executed normally.
-
- On the other hand, this seems to be mostly used for modifying
- MVC inside of memcpy, which needs a helper call anyway. So
- perhaps this doesn't bear thinking about any further. */
+ int r1 = get_field(s->fields, r1);
+ TCGv_i32 ilen;
+ TCGv_i64 v1;
- TCGv_i64 tmp;
+ /* Nested EXECUTE is not allowed. */
+ if (unlikely(s->ex_value)) {
+ gen_program_exception(s, PGM_EXECUTE);
+ return EXIT_NORETURN;
+ }
update_psw_addr(s);
- gen_op_calc_cc(s);
+ update_cc_op(s);
- tmp = tcg_const_i64(s->next_pc);
- gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
- tcg_temp_free_i64(tmp);
+ if (r1 == 0) {
+ v1 = tcg_const_i64(0);
+ } else {
+ v1 = regs[r1];
+ }
- return NO_EXIT;
+ ilen = tcg_const_i32(s->ilen);
+ gen_helper_ex(cpu_env, ilen, v1, o->in2);
+ tcg_temp_free_i32(ilen);
+
+ if (r1 == 0) {
+ tcg_temp_free_i64(v1);
+ }
+
+ return EXIT_PC_CC_UPDATED;
}
static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
@@ -2311,8 +2410,12 @@ static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
{
+ TCGv_i32 m4;
+
check_privileged(s);
- gen_helper_ipte(cpu_env, o->in1, o->in2);
+ m4 = tcg_const_i32(get_field(s->fields, m4));
+ gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
+ tcg_temp_free_i32(m4);
return NO_EXIT;
}
@@ -2324,6 +2427,27 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o)
}
#endif
+static ExitStatus op_keb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_laa(DisasContext *s, DisasOps *o)
{
/* The real output is indeed the original value in memory;
@@ -2545,7 +2669,6 @@ static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_lctl(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2557,7 +2680,6 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_lctlg(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2567,7 +2689,6 @@ static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
static ExitStatus op_lra(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_lra(o->out, cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -2624,7 +2745,6 @@ static ExitStatus op_lam(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
gen_helper_lam(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -2789,6 +2909,13 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lpq(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
#ifndef CONFIG_USER_ONLY
static ExitStatus op_lura(DisasContext *s, DisasOps *o)
{
@@ -2866,32 +2993,78 @@ static ExitStatus op_movx(DisasContext *s, DisasOps *o)
static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
return NO_EXIT;
}
+static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
- potential_page_fault(s);
- gen_helper_mvcl(cc_op, cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
+ int r1 = get_field(s->fields, r1);
+ int r2 = get_field(s->fields, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_mvcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
- gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
set_cc_static(s);
return NO_EXIT;
}
@@ -2901,7 +3074,6 @@ static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s->fields, l1);
check_privileged(s);
- potential_page_fault(s);
gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -2911,30 +3083,51 @@ static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s->fields, l1);
check_privileged(s);
- potential_page_fault(s);
gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
#endif
+static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
- gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
+ gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
return NO_EXIT;
}
+static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_mul(DisasContext *s, DisasOps *o)
{
tcg_gen_mul_i64(o->out, o->in1, o->in2);
@@ -3043,7 +3236,6 @@ static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
static ExitStatus op_nc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -3078,7 +3270,6 @@ static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
static ExitStatus op_oc(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -3107,6 +3298,46 @@ static ExitStatus op_ori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_pack(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ gen_helper_pack(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_pka(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s->fields, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l2 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pka(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_pku(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s->fields, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l2 & 1) || (l2 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pku(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
{
gen_helper_popcnt(o->out, o->in2);
@@ -3627,7 +3858,6 @@ static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_stctg(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3639,7 +3869,6 @@ static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
check_privileged(s);
- potential_page_fault(s);
gen_helper_stctl(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3871,7 +4100,6 @@ static ExitStatus op_stam(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
- potential_page_fault(s);
gen_helper_stam(cpu_env, r1, o->in2, r3);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
@@ -3975,9 +4203,14 @@ static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
+ return NO_EXIT;
+}
+
static ExitStatus op_srst(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
set_cc_static(s);
return_low128(o->in2);
@@ -4027,7 +4260,7 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o)
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
tcg_temp_free_i32(t);
- t = tcg_const_i32(s->next_pc - s->pc);
+ t = tcg_const_i32(s->ilen);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
tcg_temp_free_i32(t);
@@ -4035,6 +4268,16 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o)
return EXIT_NORETURN;
}
+static ExitStatus op_tam(DisasContext *s, DisasOps *o)
+{
+ int cc = 0;
+
+ cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
+ cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
+ gen_op_movi_cc(s, cc);
+ return NO_EXIT;
+}
+
static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
{
gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
@@ -4057,19 +4300,36 @@ static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
}
#ifndef CONFIG_USER_ONLY
+
+static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_testblock(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_tprot(cc_op, o->addr1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
+
#endif
+static ExitStatus op_tp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
+ gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
+ tcg_temp_free_i32(l1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_tr(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_tr(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
@@ -4078,7 +4338,6 @@ static ExitStatus op_tr(DisasContext *s, DisasOps *o)
static ExitStatus op_tre(DisasContext *s, DisasOps *o)
{
- potential_page_fault(s);
gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
return_low128(o->out2);
set_cc_static(s);
@@ -4088,22 +4347,95 @@ static ExitStatus op_tre(DisasContext *s, DisasOps *o)
static ExitStatus op_trt(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
set_cc_static(s);
return NO_EXIT;
}
+static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
+ TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
+ TCGv_i32 tst = tcg_temp_new_i32();
+ int m3 = get_field(s->fields, m3);
+
+ /* XXX: the C bit in M3 should be considered as 0 when the
+ ETF2-enhancement facility is not installed. */
+ if (m3 & 1) {
+ tcg_gen_movi_i32(tst, -1);
+ } else {
+ tcg_gen_extrl_i64_i32(tst, regs[0]);
+ if (s->insn->opc & 3) {
+ tcg_gen_ext8u_i32(tst, tst);
+ } else {
+ tcg_gen_ext16u_i32(tst, tst);
+ }
+ }
+ gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
+
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ tcg_temp_free_i32(sizes);
+ tcg_temp_free_i32(tst);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ts(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_const_i32(0xff);
+ tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
+ tcg_gen_extract_i32(cc_op, t1, 7, 1);
+ tcg_temp_free_i32(t1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
- potential_page_fault(s);
gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
tcg_temp_free_i32(l);
return NO_EXIT;
}
+static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s->fields, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l1 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s->fields, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l1 & 1) || (l1 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+
static ExitStatus op_xc(DisasContext *s, DisasOps *o)
{
int d1 = get_field(s->fields, d1);
@@ -4151,7 +4483,6 @@ static ExitStatus op_xc(DisasContext *s, DisasOps *o)
/* But in general we'll defer to a helper. */
o->in2 = get_address(s, 0, b2, d2);
t32 = tcg_const_i32(l);
- potential_page_fault(s);
gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
tcg_temp_free_i32(t32);
set_cc_static(s);
@@ -5158,24 +5489,36 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
int op, op2, ilen;
const DisasInsn *info;
- insn = ld_code2(env, pc);
- op = (insn >> 8) & 0xff;
- ilen = get_ilen(op);
- s->next_pc = s->pc + ilen;
+ if (unlikely(s->ex_value)) {
+ /* Drop the EX data now, so that it's clear on exception paths. */
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
+ tcg_temp_free_i64(zero);
- switch (ilen) {
- case 2:
- insn = insn << 48;
- break;
- case 4:
- insn = ld_code4(env, pc) << 32;
- break;
- case 6:
- insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
- break;
- default:
- abort();
+ /* Extract the values saved by EXECUTE. */
+ insn = s->ex_value & 0xffffffffffff0000ull;
+ ilen = s->ex_value & 0xf;
+ op = insn >> 56;
+ } else {
+ insn = ld_code2(env, pc);
+ op = (insn >> 8) & 0xff;
+ ilen = get_ilen(op);
+ switch (ilen) {
+ case 2:
+ insn = insn << 48;
+ break;
+ case 4:
+ insn = ld_code4(env, pc) << 32;
+ break;
+ case 6:
+ insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
+ s->next_pc = s->pc + ilen;
+ s->ilen = ilen;
/* We can't actually determine the insn format until we've looked up
the full insn opcode. Which we can't do without locating the
@@ -5392,6 +5735,7 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
dc.tb = tb;
dc.pc = pc_start;
dc.cc_op = CC_OP_DYNAMIC;
+ dc.ex_value = tb->cs_base;
do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
@@ -5426,10 +5770,7 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
gen_io_start();
}
- status = NO_EXIT;
- if (status == NO_EXIT) {
- status = translate_one(env, &dc);
- }
+ status = translate_one(env, &dc);
/* If we reach a page boundary, are single stepping,
or exhaust instruction count, stop generation. */
@@ -5438,7 +5779,8 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
|| tcg_op_buf_full()
|| num_insns >= max_insns
|| singlestep
- || cs->singlestep_enabled)) {
+ || cs->singlestep_enabled
+ || dc.ex_value)) {
status = EXIT_PC_STALE;
}
} while (status == NO_EXIT);
@@ -5458,11 +5800,15 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
/* Next TB starts off with CC_OP_DYNAMIC, so make sure the
cc op type is in env */
update_cc_op(&dc);
+ /* FALLTHRU */
+ case EXIT_PC_CC_UPDATED:
/* Exit the TB, either by raising a debug exception or by return. */
if (do_debug) {
gen_exception(EXCP_DEBUG);
- } else {
+ } else if (use_exit_tb(&dc)) {
tcg_gen_exit_tb(0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr(psw_addr);
}
break;
default:
@@ -5478,9 +5824,14 @@ void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(pc_start)) {
qemu_log_lock();
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
- qemu_log("\n");
+ if (unlikely(dc.ex_value)) {
+ /* ??? Unfortunately log_target_disas can't use host memory. */
+ qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
+ } else {
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
+ qemu_log("\n");
+ }
qemu_log_unlock();
}
#endif
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 6c07c6b..ffb9168 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -91,8 +91,10 @@
#define FPSCR_RM_NEAREST (0 << 0)
#define FPSCR_RM_ZERO (1 << 0)
+#define DELAY_SLOT_MASK 0x7
#define DELAY_SLOT (1 << 0)
#define DELAY_SLOT_CONDITIONAL (1 << 1)
+#define DELAY_SLOT_RTE (1 << 2)
typedef struct tlb_t {
uint32_t vpn; /* virtual page number */
@@ -263,7 +265,13 @@ void cpu_load_tlb(CPUSH4State * env);
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
{
- return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+ /* The instruction in a RTE delay slot is fetched in privileged
+ mode, but executed in user mode. */
+ if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
+ return 0;
+ } else {
+ return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+ }
}
#include "exec/cpu-all.h"
@@ -380,7 +388,7 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
{
*pc = env->pc;
*cs_base = 0;
- *flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) /* Bits 0-1 */
+ *flags = (env->flags & DELAY_SLOT_MASK) /* Bits 0- 2 */
| (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
| (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
| (env->sr & (1u << SR_FD)) /* Bit 15 */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 8f8ce81..28d93c2 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -21,6 +21,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/log.h"
+#include "sysemu/sysemu.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/sh4/sh_intc.h"
@@ -92,7 +93,14 @@ void superh_cpu_do_interrupt(CPUState *cs)
if (env->sr & (1u << SR_BL)) {
if (do_exp && cs->exception_index != 0x1e0) {
- cs->exception_index = 0x000; /* masked exception -> reset */
+ /* In theory a masked exception generates a reset exception,
+ which in turn jumps to the reset vector. However this only
+ works when using a bootloader. When using a kernel and an
+ initrd, they need to be reloaded and the program counter
+ should be loaded with the kernel entry point.
+ qemu_system_reset_request takes care of that. */
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return;
}
if (do_irq && !env->in_sleep) {
return; /* masked */
@@ -164,11 +172,11 @@ void superh_cpu_do_interrupt(CPUState *cs)
env->sgr = env->gregs[15];
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
- if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ if (env->flags & DELAY_SLOT_MASK) {
/* Branch instruction should be executed again before delay slot. */
env->spc -= 2;
/* Clear flags for exception/interrupt routine. */
- env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ env->flags &= ~DELAY_SLOT_MASK;
}
if (do_exp) {
@@ -420,7 +428,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
if (!(env->sr & (1u << SR_MD))
&& (address < 0xe0000000 || address >= 0xe4000000)) {
/* Unauthorized access in user mode (only store queues are available) */
- fprintf(stderr, "Unauthorized access\n");
+ qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
if (rw == 0)
return MMU_DADDR_ERROR_READ;
else if (rw == 1)
@@ -863,8 +871,16 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
if (interrupt_request & CPU_INTERRUPT_HARD) {
- superh_cpu_do_interrupt(cs);
- return true;
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ /* Delay slots are indivisible, ignore interrupts */
+ if (env->flags & DELAY_SLOT_MASK) {
+ return false;
+ } else {
+ superh_cpu_do_interrupt(cs);
+ return true;
+ }
}
return false;
}
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 0bc2f9f..8bc132b 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -185,6 +185,9 @@ void superh_cpu_dump_state(CPUState *cs, FILE *f,
} else if (env->flags & DELAY_SLOT_CONDITIONAL) {
cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
env->delayed_pc);
+ } else if (env->flags & DELAY_SLOT_RTE) {
+ cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
}
}
@@ -217,8 +220,7 @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
if (ctx->delayed_pc != (uint32_t) -1) {
tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
}
- if ((ctx->tbflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
- != ctx->envflags) {
+ if ((ctx->tbflags & DELAY_SLOT_MASK) != ctx->envflags) {
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
}
}
@@ -329,7 +331,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
#define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
#define CHECK_NOT_DELAY_SLOT \
- if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+ if (ctx->envflags & DELAY_SLOT_MASK) { \
gen_save_cpu_state(ctx, true); \
gen_helper_raise_slot_illegal_instruction(cpu_env); \
ctx->bstate = BS_EXCP; \
@@ -339,7 +341,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
#define CHECK_PRIVILEGED \
if (IS_USER(ctx)) { \
gen_save_cpu_state(ctx, true); \
- if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+ if (ctx->envflags & DELAY_SLOT_MASK) { \
gen_helper_raise_slot_illegal_instruction(cpu_env); \
} else { \
gen_helper_raise_illegal_instruction(cpu_env); \
@@ -351,7 +353,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
#define CHECK_FPU_ENABLED \
if (ctx->tbflags & (1u << SR_FD)) { \
gen_save_cpu_state(ctx, true); \
- if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+ if (ctx->envflags & DELAY_SLOT_MASK) { \
gen_helper_raise_slot_fpu_disable(cpu_env); \
} else { \
gen_helper_raise_fpu_disable(cpu_env); \
@@ -428,8 +430,9 @@ static void _decode_opc(DisasContext * ctx)
CHECK_NOT_DELAY_SLOT
gen_write_sr(cpu_ssr);
tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
- ctx->envflags |= DELAY_SLOT;
+ ctx->envflags |= DELAY_SLOT_RTE;
ctx->delayed_pc = (uint32_t) - 1;
+ ctx->bstate = BS_STOP;
return;
case 0x0058: /* sets */
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
@@ -1784,7 +1787,7 @@ static void _decode_opc(DisasContext * ctx)
fflush(stderr);
#endif
gen_save_cpu_state(ctx, true);
- if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ if (ctx->envflags & DELAY_SLOT_MASK) {
gen_helper_raise_slot_illegal_instruction(cpu_env);
} else {
gen_helper_raise_illegal_instruction(cpu_env);
@@ -1798,14 +1801,14 @@ static void decode_opc(DisasContext * ctx)
_decode_opc(ctx);
- if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ if (old_flags & DELAY_SLOT_MASK) {
/* go out of the delay slot */
- ctx->envflags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ ctx->envflags &= ~DELAY_SLOT_MASK;
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
ctx->bstate = BS_BRANCH;
if (old_flags & DELAY_SLOT_CONDITIONAL) {
gen_delayed_conditional_jump(ctx);
- } else if (old_flags & DELAY_SLOT) {
+ } else {
gen_jump(ctx);
}
@@ -1824,7 +1827,7 @@ void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
pc_start = tb->pc;
ctx.pc = pc_start;
ctx.tbflags = (uint32_t)tb->flags;
- ctx.envflags = tb->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ ctx.envflags = tb->flags & DELAY_SLOT_MASK;
ctx.bstate = BS_NONE;
ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
/* We don't know if the delayed pc came from a dynamic or static branch,
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
index d90e38e..4532639 100644
--- a/target/tilegx/cpu.c
+++ b/target/tilegx/cpu.c
@@ -23,7 +23,6 @@
#include "cpu.h"
#include "qemu-common.h"
#include "hw/qdev-properties.h"
-#include "migration/vmstate.h"
#include "linux-user/syscall_defs.h"
#include "exec/exec-all.h"
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index ecca17d..ee29fb1 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -483,6 +483,7 @@ void xtensa_translate_init(void);
void xtensa_breakpoint_handler(CPUState *cs);
void xtensa_finalize_config(XtensaConfig *config);
void xtensa_register_core(XtensaConfigList *node);
+void xtensa_sim_open_console(Chardev *chr);
void check_interrupts(CPUXtensaState *s);
void xtensa_irq_init(CPUXtensaState *env);
void *xtensa_get_extint(CPUXtensaState *env, unsigned extint);
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
index fa5469a..da131ae 100644
--- a/target/xtensa/gdbstub.c
+++ b/target/xtensa/gdbstub.c
@@ -58,7 +58,10 @@ int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
case 8:
return gdb_get_reg64(mem_buf, float64_val(env->fregs[i].f64));
default:
- return 0;
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ memset(mem_buf, 0, reg->size);
+ return reg->size;
}
case 8: /*a*/
@@ -67,6 +70,8 @@ int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
default:
qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported type %d\n",
__func__, n, reg->type);
+ memset(mem_buf, 0, reg->size);
+ return reg->size;
return 0;
}
}
@@ -111,7 +116,9 @@ int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->fregs[reg->targno & 0x0f].f64 = make_float64(tmp);
return 8;
default:
- return 0;
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ return reg->size;
}
case 8: /*a*/
@@ -121,7 +128,7 @@ int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
default:
qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported type %d\n",
__func__, n, reg->type);
- return 0;
+ return reg->size;
}
return 4;
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
index a888a9d..32e2bd7 100644
--- a/target/xtensa/xtensa-semi.c
+++ b/target/xtensa/xtensa-semi.c
@@ -27,9 +27,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
+#include "chardev/char-fe.h"
#include "exec/helper-proto.h"
#include "exec/semihost.h"
+#include "qapi/error.h"
#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+
+static CharBackend *xtensa_sim_console;
enum {
TARGET_SYS_exit = 1,
@@ -148,6 +153,15 @@ static uint32_t errno_h2g(int host_errno)
}
}
+void xtensa_sim_open_console(Chardev *chr)
+{
+ static CharBackend console;
+
+ qemu_chr_fe_init(&console, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&console, NULL, NULL, NULL, NULL, NULL, true);
+ xtensa_sim_console = &console;
+}
+
void HELPER(simcall)(CPUXtensaState *env)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
@@ -166,6 +180,7 @@ void HELPER(simcall)(CPUXtensaState *env)
uint32_t fd = regs[3];
uint32_t vaddr = regs[4];
uint32_t len = regs[5];
+ uint32_t len_done = 0;
while (len > 0) {
hwaddr paddr = cpu_get_phys_page_debug(cs, vaddr);
@@ -173,25 +188,54 @@ void HELPER(simcall)(CPUXtensaState *env)
TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
uint32_t io_sz = page_left < len ? page_left : len;
hwaddr sz = io_sz;
- void *buf = cpu_physical_memory_map(paddr, &sz, is_write);
+ void *buf = cpu_physical_memory_map(paddr, &sz, !is_write);
+ uint32_t io_done;
+ bool error = false;
if (buf) {
vaddr += io_sz;
len -= io_sz;
- regs[2] = is_write ?
- write(fd, buf, io_sz) :
- read(fd, buf, io_sz);
- regs[3] = errno_h2g(errno);
- cpu_physical_memory_unmap(buf, sz, is_write, sz);
- if (regs[2] == -1) {
- break;
+ if (fd < 3 && xtensa_sim_console) {
+ if (is_write && (fd == 1 || fd == 2)) {
+ io_done = qemu_chr_fe_write_all(xtensa_sim_console,
+ buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s fd %d is not supported with chardev console\n",
+ is_write ?
+ "writing to" : "reading from", fd);
+ io_done = -1;
+ regs[3] = TARGET_EBADF;
+ }
+ } else {
+ io_done = is_write ?
+ write(fd, buf, io_sz) :
+ read(fd, buf, io_sz);
+ regs[3] = errno_h2g(errno);
}
+ if (io_done == -1) {
+ error = true;
+ io_done = 0;
+ }
+ cpu_physical_memory_unmap(buf, sz, !is_write, io_done);
} else {
- regs[2] = -1;
+ error = true;
regs[3] = TARGET_EINVAL;
break;
}
+ if (error) {
+ if (!len_done) {
+ len_done = -1;
+ }
+ break;
+ }
+ len_done += io_done;
+ if (io_done < io_sz) {
+ break;
+ }
}
+ regs[2] = len_done;
}
break;
@@ -241,10 +285,6 @@ void HELPER(simcall)(CPUXtensaState *env)
uint32_t target_tvv[2];
struct timeval tv = {0};
- fd_set fdset;
-
- FD_ZERO(&fdset);
- FD_SET(fd, &fdset);
if (target_tv) {
cpu_memory_rw_debug(cs, target_tv,
@@ -252,12 +292,25 @@ void HELPER(simcall)(CPUXtensaState *env)
tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
}
- regs[2] = select(fd + 1,
- rq == SELECT_ONE_READ ? &fdset : NULL,
- rq == SELECT_ONE_WRITE ? &fdset : NULL,
- rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
- target_tv ? &tv : NULL);
- regs[3] = errno_h2g(errno);
+ if (fd < 3 && xtensa_sim_console) {
+ if ((fd == 1 || fd == 2) && rq == SELECT_ONE_WRITE) {
+ regs[2] = 1;
+ } else {
+ regs[2] = 0;
+ }
+ regs[3] = 0;
+ } else {
+ fd_set fdset;
+
+ FD_ZERO(&fdset);
+ FD_SET(fd, &fdset);
+ regs[2] = select(fd + 1,
+ rq == SELECT_ONE_READ ? &fdset : NULL,
+ rq == SELECT_ONE_WRITE ? &fdset : NULL,
+ rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
+ target_tv ? &tv : NULL);
+ regs[3] = errno_h2g(errno);
+ }
}
break;