diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/alpha/cpu.h | 2 | ||||
-rw-r--r-- | target/alpha/translate.c | 2 | ||||
-rw-r--r-- | target/arm/translate-a64.c | 4 | ||||
-rw-r--r-- | target/arm/translate.c | 7 | ||||
-rw-r--r-- | target/cris/translate.c | 2 | ||||
-rw-r--r-- | target/hppa/cpu.h | 1 | ||||
-rw-r--r-- | target/hppa/translate.c | 1 | ||||
-rw-r--r-- | target/i386/cpu.c | 43 | ||||
-rw-r--r-- | target/i386/cpu.h | 8 | ||||
-rw-r--r-- | target/i386/kvm.c | 205 | ||||
-rw-r--r-- | target/i386/machine.c | 20 | ||||
-rw-r--r-- | target/i386/ops_sse.h | 88 | ||||
-rw-r--r-- | target/i386/translate.c | 10 | ||||
-rw-r--r-- | target/lm32/translate.c | 9 | ||||
-rw-r--r-- | target/microblaze/translate.c | 2 | ||||
-rw-r--r-- | target/mips/cpu.h | 2 | ||||
-rw-r--r-- | target/mips/translate.c | 11 | ||||
-rw-r--r-- | target/nios2/translate.c | 4 | ||||
-rw-r--r-- | target/ppc/translate.c | 13 | ||||
-rw-r--r-- | target/ppc/translate_init.inc.c | 2 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvi.inc.c | 1 | ||||
-rw-r--r-- | target/sh4/cpu.h | 2 | ||||
-rw-r--r-- | target/sparc/cpu.h | 2 | ||||
-rw-r--r-- | target/sparc/translate.c | 16 | ||||
-rw-r--r-- | target/unicore32/translate.c | 1 | ||||
-rw-r--r-- | target/xtensa/cpu.h | 2 | ||||
-rw-r--r-- | target/xtensa/translate.c | 15 |
27 files changed, 239 insertions, 236 deletions
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 4619530..a530249 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -23,8 +23,6 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" -#define ALIGNED_ONLY - /* Alpha processors have a weak memory model */ #define TCG_GUEST_DEFAULT_MO (0) diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 2c9cccf..1e29653 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -1332,7 +1332,6 @@ static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno) if (use_icount) { gen_io_start(); helper(va); - gen_io_end(); return DISAS_PC_STALE; } else { helper(va); @@ -2398,7 +2397,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(); gen_helper_load_pcc(va, cpu_env); - gen_io_end(); ret = DISAS_PC_STALE; } else { gen_helper_load_pcc(va, cpu_env); diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index fc3e5f5..6fd0b77 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -1775,7 +1775,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ - gen_io_end(); s->base.is_jmp = DISAS_UPDATE; } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* We default to ending the TB on a coprocessor register write, @@ -2084,9 +2083,6 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) gen_helper_exception_return(cpu_env, dst); tcg_temp_free_i64(dst); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return; diff --git a/target/arm/translate.c b/target/arm/translate.c index d948757..cbe19b7 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3213,9 +3213,6 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, cpsr); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } tcg_temp_free_i32(cpsr); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; @@ -7303,7 +7300,6 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { /* I/O operations must end the TB here (whether read or write) */ - gen_io_end(); gen_lookup_tb(s); } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { /* We default to ending the TB on a coprocessor register write, @@ -9163,9 +9159,6 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, tmp); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } tcg_temp_free_i32(tmp); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; diff --git a/target/cris/translate.c b/target/cris/translate.c index 3429a3b..e752bd0 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -3225,8 +3225,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) npc = dc->pc; - if (tb_cflags(tb) & CF_LAST_IO) - gen_io_end(); /* Force an update if the per-tb cpu state has changed. */ if (dc->is_jmp == DISAS_NEXT && (dc->cpustate_changed || !dc->flagx_known diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 4b816cc..6713d04 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -30,7 +30,6 @@ basis. It's probably easier to fall back to a strong memory model. */ #define TCG_GUEST_DEFAULT_MO TCG_MO_ALL -#define ALIGNED_ONLY #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 3 #define MMU_PHYS_IDX 4 diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 188fe68..8c61895 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -2161,7 +2161,6 @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { gen_io_start(); gen_helper_read_interval_timer(tmp); - gen_io_end(); ctx->base.is_jmp = DISAS_IAQ_N_STALE; } else { gen_helper_read_interval_timer(tmp); diff --git a/target/i386/cpu.c b/target/i386/cpu.c index ff65e11..9e0bac3 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -770,6 +770,7 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, /* CPUID_7_0_ECX_OSPKE is dynamic */ \ CPUID_7_0_ECX_LA57) #define TCG_7_0_EDX_FEATURES 0 +#define TCG_7_1_EAX_FEATURES 0 #define TCG_APM_FEATURES 0 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) @@ -906,7 +907,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", - NULL, NULL, NULL, NULL, + "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "kvmclock-stable-bit", NULL, NULL, NULL, @@ -1095,6 +1096,25 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_0_EDX_FEATURES, }, + [FEAT_7_1_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "avx512-bf16", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_7_1_EAX_FEATURES, + }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -4292,13 +4312,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 7: /* Structured Extended Feature Flags Enumeration Leaf */ if (count == 0) { - *eax = 0; /* Maximum ECX value for sub-leaves */ + /* Maximum ECX value for sub-leaves */ + *eax = env->cpuid_level_func7; *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ + } else if (count == 1) { + *eax = env->features[FEAT_7_1_EAX]; + *ebx = 0; + *ecx = 0; + *edx = 0; } else { *eax = 0; *ebx = 0; @@ -4948,6 +4974,11 @@ static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); break; } + + if (eax == 7) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, + fi->cpuid.ecx); + } } /* Calculate XSAVE components based on the configured CPU feature flags */ @@ -5066,6 +5097,7 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); + x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); @@ -5097,6 +5129,9 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) } /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ + if (env->cpuid_level_func7 == UINT32_MAX) { + env->cpuid_level_func7 = env->cpuid_min_level_func7; + } if (env->cpuid_level == UINT32_MAX) { env->cpuid_level = env->cpuid_min_level; } @@ -5660,6 +5695,8 @@ static void x86_cpu_initfn(Object *obj) object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); + object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control", + &error_abort); object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); @@ -5868,6 +5905,8 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), + DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, + UINT32_MAX), DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index ecd0ec0..5f6e3a0 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -479,6 +479,7 @@ typedef enum FeatureWord { FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ + FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ @@ -692,6 +693,8 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /*Core Capability*/ #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */ +#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) /* AVX512 BFloat16 Instruction */ + #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and do not invalidate cache */ #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */ @@ -1260,6 +1263,7 @@ typedef struct CPUX86State { uint64_t steal_time_msr; uint64_t async_pf_en_msr; uint64_t pv_eoi_en_msr; + uint64_t poll_control_msr; /* Partition-wide HV MSRs, will be updated only on the first vcpu */ uint64_t msr_hv_hypercall; @@ -1322,6 +1326,10 @@ typedef struct CPUX86State { /* Fields after this point are preserved across CPU reset. */ /* processor features (e.g. for CPUID insn) */ + /* Minimum cpuid leaf 7 value */ + uint32_t cpuid_level_func7; + /* Actual cpuid leaf 7 value */ + uint32_t cpuid_min_level_func7; /* Minimum level/xlevel/xlevel2, based on CPU model + features */ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 2abc881..8023c67 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -193,6 +193,7 @@ static int kvm_get_tsc(CPUState *cs) return 0; } + memset(&msr_data, 0, sizeof(msr_data)); msr_data.info.nmsrs = 1; msr_data.entries[0].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); @@ -1500,6 +1501,7 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; + case 0x7: case 0x14: { uint32_t times; @@ -1512,7 +1514,7 @@ int kvm_arch_init_vcpu(CPUState *cs) for (j = 1; j <= times; ++j) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "cpuid_data is full, no space for " - "cpuid(eax:0x14,ecx:0x%x)\n", j); + "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); abort(); } c = &cpuid_data.entries[cpuid_i++]; @@ -1709,6 +1711,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (has_xsave) { env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); + memset(env->xsave_buf, 0, sizeof(struct kvm_xsave)); } max_nested_state_len = kvm_max_nested_state_length(); @@ -1785,6 +1788,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) hyperv_x86_synic_reset(cpu); } + /* enabled by default */ + env->poll_control_msr = 1; } void kvm_arch_do_init_vcpu(X86CPU *cpu) @@ -1840,108 +1845,105 @@ static int kvm_get_supported_feature_msrs(KVMState *s) static int kvm_get_supported_msrs(KVMState *s) { - static int kvm_supported_msrs; int ret = 0; + struct kvm_msr_list msr_list, *kvm_msr_list; - /* first time */ - if (kvm_supported_msrs == 0) { - struct kvm_msr_list msr_list, *kvm_msr_list; + /* + * Obtain MSR list from KVM. These are the MSRs that we must + * save/restore. + */ + msr_list.nmsrs = 0; + ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); + if (ret < 0 && ret != -E2BIG) { + return ret; + } + /* + * Old kernel modules had a bug and could write beyond the provided + * memory. Allocate at least a safe amount of 1K. + */ + kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + + msr_list.nmsrs * + sizeof(msr_list.indices[0]))); - kvm_supported_msrs = -1; + kvm_msr_list->nmsrs = msr_list.nmsrs; + ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); + if (ret >= 0) { + int i; - /* Obtain MSR list from KVM. These are the MSRs that we must - * save/restore */ - msr_list.nmsrs = 0; - ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); - if (ret < 0 && ret != -E2BIG) { - return ret; - } - /* Old kernel modules had a bug and could write beyond the provided - memory. Allocate at least a safe amount of 1K. */ - kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + - msr_list.nmsrs * - sizeof(msr_list.indices[0]))); - - kvm_msr_list->nmsrs = msr_list.nmsrs; - ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); - if (ret >= 0) { - int i; - - for (i = 0; i < kvm_msr_list->nmsrs; i++) { - switch (kvm_msr_list->indices[i]) { - case MSR_STAR: - has_msr_star = true; - break; - case MSR_VM_HSAVE_PA: - has_msr_hsave_pa = true; - break; - case MSR_TSC_AUX: - has_msr_tsc_aux = true; - break; - case MSR_TSC_ADJUST: - has_msr_tsc_adjust = true; - break; - case MSR_IA32_TSCDEADLINE: - has_msr_tsc_deadline = true; - break; - case MSR_IA32_SMBASE: - has_msr_smbase = true; - break; - case MSR_SMI_COUNT: - has_msr_smi_count = true; - break; - case MSR_IA32_MISC_ENABLE: - has_msr_misc_enable = true; - break; - case MSR_IA32_BNDCFGS: - has_msr_bndcfgs = true; - break; - case MSR_IA32_XSS: - has_msr_xss = true; - break; - case HV_X64_MSR_CRASH_CTL: - has_msr_hv_crash = true; - break; - case HV_X64_MSR_RESET: - has_msr_hv_reset = true; - break; - case HV_X64_MSR_VP_INDEX: - has_msr_hv_vpindex = true; - break; - case HV_X64_MSR_VP_RUNTIME: - has_msr_hv_runtime = true; - break; - case HV_X64_MSR_SCONTROL: - has_msr_hv_synic = true; - break; - case HV_X64_MSR_STIMER0_CONFIG: - has_msr_hv_stimer = true; - break; - case HV_X64_MSR_TSC_FREQUENCY: - has_msr_hv_frequencies = true; - break; - case HV_X64_MSR_REENLIGHTENMENT_CONTROL: - has_msr_hv_reenlightenment = true; - break; - case MSR_IA32_SPEC_CTRL: - has_msr_spec_ctrl = true; - break; - case MSR_VIRT_SSBD: - has_msr_virt_ssbd = true; - break; - case MSR_IA32_ARCH_CAPABILITIES: - has_msr_arch_capabs = true; - break; - case MSR_IA32_CORE_CAPABILITY: - has_msr_core_capabs = true; - break; - } + for (i = 0; i < kvm_msr_list->nmsrs; i++) { + switch (kvm_msr_list->indices[i]) { + case MSR_STAR: + has_msr_star = true; + break; + case MSR_VM_HSAVE_PA: + has_msr_hsave_pa = true; + break; + case MSR_TSC_AUX: + has_msr_tsc_aux = true; + break; + case MSR_TSC_ADJUST: + has_msr_tsc_adjust = true; + break; + case MSR_IA32_TSCDEADLINE: + has_msr_tsc_deadline = true; + break; + case MSR_IA32_SMBASE: + has_msr_smbase = true; + break; + case MSR_SMI_COUNT: + has_msr_smi_count = true; + break; + case MSR_IA32_MISC_ENABLE: + has_msr_misc_enable = true; + break; + case MSR_IA32_BNDCFGS: + has_msr_bndcfgs = true; + break; + case MSR_IA32_XSS: + has_msr_xss = true; + break; + case HV_X64_MSR_CRASH_CTL: + has_msr_hv_crash = true; + break; + case HV_X64_MSR_RESET: + has_msr_hv_reset = true; + break; + case HV_X64_MSR_VP_INDEX: + has_msr_hv_vpindex = true; + break; + case HV_X64_MSR_VP_RUNTIME: + has_msr_hv_runtime = true; + break; + case HV_X64_MSR_SCONTROL: + has_msr_hv_synic = true; + break; + case HV_X64_MSR_STIMER0_CONFIG: + has_msr_hv_stimer = true; + break; + case HV_X64_MSR_TSC_FREQUENCY: + has_msr_hv_frequencies = true; + break; + case HV_X64_MSR_REENLIGHTENMENT_CONTROL: + has_msr_hv_reenlightenment = true; + break; + case MSR_IA32_SPEC_CTRL: + has_msr_spec_ctrl = true; + break; + case MSR_VIRT_SSBD: + has_msr_virt_ssbd = true; + break; + case MSR_IA32_ARCH_CAPABILITIES: + has_msr_arch_capabs = true; + break; + case MSR_IA32_CORE_CAPABILITY: + has_msr_core_capabs = true; + break; } } - - g_free(kvm_msr_list); } + g_free(kvm_msr_list); + return ret; } @@ -2493,6 +2495,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } + + if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); + } + if (has_architectural_pmu_version > 0) { if (has_architectural_pmu_version > 1) { /* Stop the counter. */ @@ -2878,6 +2885,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } + if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); + } if (has_architectural_pmu_version > 0) { if (has_architectural_pmu_version > 1) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); @@ -3112,6 +3122,10 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_KVM_STEAL_TIME: env->steal_time_msr = msrs[i].data; break; + case MSR_KVM_POLL_CONTROL: { + env->poll_control_msr = msrs[i].data; + break; + } case MSR_CORE_PERF_FIXED_CTR_CTRL: env->msr_fixed_ctr_ctrl = msrs[i].data; break; @@ -3480,6 +3494,7 @@ static int kvm_put_debugregs(X86CPU *cpu) return 0; } + memset(&dbgregs, 0, sizeof(dbgregs)); for (i = 0; i < 4; i++) { dbgregs.db[i] = env->dr[i]; } diff --git a/target/i386/machine.c b/target/i386/machine.c index ce55755..2767b30 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -437,6 +437,14 @@ static const VMStateDescription vmstate_exception_info = { } }; +/* Poll control MSR enabled by default */ +static bool poll_control_msr_needed(void *opaque) +{ + X86CPU *cpu = opaque; + + return cpu->env.poll_control_msr != 1; +} + static const VMStateDescription vmstate_steal_time_msr = { .name = "cpu/steal_time_msr", .version_id = 1, @@ -470,6 +478,17 @@ static const VMStateDescription vmstate_pv_eoi_msr = { } }; +static const VMStateDescription vmstate_poll_control_msr = { + .name = "cpu/poll_control_msr", + .version_id = 1, + .minimum_version_id = 1, + .needed = poll_control_msr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.poll_control_msr, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + static bool fpop_ip_dp_needed(void *opaque) { X86CPU *cpu = opaque; @@ -1354,6 +1373,7 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_async_pf_msr, &vmstate_pv_eoi_msr, &vmstate_steal_time_msr, + &vmstate_poll_control_msr, &vmstate_fpop_ip_dp, &vmstate_msr_tsc_adjust, &vmstate_msr_tscdeadline, diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index ed05989..ec1ec74 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -710,102 +710,134 @@ void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) #endif /* float to integer */ + +/* + * x86 mandates that we return the indefinite integer value for the result + * of any float-to-integer conversion that raises the 'invalid' exception. + * Wrap the softfloat functions to get this behaviour. + */ +#define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \ + static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \ + { \ + int oldflags, newflags; \ + RETTYPE r; \ + \ + oldflags = get_float_exception_flags(s); \ + set_float_exception_flags(0, s); \ + r = FN(a, s); \ + newflags = get_float_exception_flags(s); \ + if (newflags & float_flag_invalid) { \ + r = INDEFVALUE; \ + } \ + set_float_exception_flags(newflags | oldflags, s); \ + return r; \ + } + +WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN) +WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN) +WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN) +WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN) +WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) +WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) +WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) +WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) + void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status); - d->ZMM_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status); - d->ZMM_L(2) = float32_to_int32(s->ZMM_S(2), &env->sse_status); - d->ZMM_L(3) = float32_to_int32(s->ZMM_S(3), &env->sse_status); + d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); + d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); + d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status); + d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status); } void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status); - d->ZMM_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status); + d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); + d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); d->ZMM_Q(1) = 0; } void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status); - d->MMX_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status); + d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); } void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status); - d->MMX_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status); + d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); } int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s) { - return float32_to_int32(s->ZMM_S(0), &env->sse_status); + return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); } int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s) { - return float64_to_int32(s->ZMM_D(0), &env->sse_status); + return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s) { - return float32_to_int64(s->ZMM_S(0), &env->sse_status); + return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status); } int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) { - return float64_to_int64(s->ZMM_D(0), &env->sse_status); + return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); } #endif /* float to integer truncated */ void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); - d->ZMM_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); - d->ZMM_L(2) = float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status); - d->ZMM_L(3) = float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status); + d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); + d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); + d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status); + d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status); } void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); - d->ZMM_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); + d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); + d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); d->ZMM_Q(1) = 0; } void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); - d->MMX_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); + d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); } void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) { - d->MMX_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); - d->MMX_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); + d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); } int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s) { - return float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); + return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); } int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s) { - return float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); + return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); } #ifdef TARGET_X86_64 int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s) { - return float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); + return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); } int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) { - return float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); + return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); } #endif diff --git a/target/i386/translate.c b/target/i386/translate.c index 03150a8..5cd74ad 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -5381,7 +5381,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, dflag, rm, s->T0); set_cc_op(s, CC_OP_EFLAGS); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -6443,7 +6442,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -6464,7 +6462,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -6482,7 +6479,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, ot, R_EAX, s->T1); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -6502,7 +6498,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); gen_bpt_io(s, s->tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -7206,7 +7201,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_helper_rdtsc(cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -7666,7 +7660,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) } gen_helper_rdtscp(cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; @@ -8036,9 +8029,6 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_v_reg(s, ot, s->T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), s->T0); - if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } gen_jmp_im(s, s->pc - s->cs_base); gen_eob(s); } else { diff --git a/target/lm32/translate.c b/target/lm32/translate.c index b9f2f2c..778cae1 100644 --- a/target/lm32/translate.c +++ b/target/lm32/translate.c @@ -885,9 +885,6 @@ static void dec_wcsr(DisasContext *dc) } gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]); tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - if (tb_cflags(dc->tb) & CF_USE_ICOUNT) { - gen_io_end(); - } dc->is_jmp = DISAS_UPDATE; break; case CSR_IP: @@ -897,9 +894,6 @@ static void dec_wcsr(DisasContext *dc) } gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]); tcg_gen_movi_tl(cpu_pc, dc->pc + 4); - if (tb_cflags(dc->tb) & CF_USE_ICOUNT) { - gen_io_end(); - } dc->is_jmp = DISAS_UPDATE; break; case CSR_ICC: @@ -1111,9 +1105,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) && (dc->pc - page_start < TARGET_PAGE_SIZE) && num_insns < max_insns); - if (tb_cflags(tb) & CF_LAST_IO) { - gen_io_end(); - } if (unlikely(cs->singlestep_enabled)) { if (dc->is_jmp == DISAS_NEXT) { diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 9ce65f3..95ff663 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -1724,8 +1724,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) npc = dc->jmp_pc; } - if (tb_cflags(tb) & CF_LAST_IO) - gen_io_end(); /* Force an update if the per-tb cpu state has changed. */ if (dc->is_jmp == DISAS_NEXT && (dc->cpustate_changed || org_flags != dc->tb_flags)) { diff --git a/target/mips/cpu.h b/target/mips/cpu.h index d235117..1fd4a18 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -1,8 +1,6 @@ #ifndef MIPS_CPU_H #define MIPS_CPU_H -#define ALIGNED_ONLY - #include "cpu-qom.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" diff --git a/target/mips/translate.c b/target/mips/translate.c index 1c50e5a..8ebde6f 100644 --- a/target/mips/translate.c +++ b/target/mips/translate.c @@ -7129,9 +7129,6 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_io_start(); } gen_helper_mfc0_count(arg, cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } /* * Break the TB to be able to take timer interrupts immediately * after reading count. DISAS_STOP isn't sufficient, we need to @@ -8296,7 +8293,6 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) /* For simplicity assume that all writes can cause interrupts. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); /* * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. @@ -8607,9 +8603,6 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_io_start(); } gen_helper_mfc0_count(arg, cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } /* * Break the TB to be able to take timer interrupts immediately * after reading count. DISAS_STOP isn't sufficient, we need to @@ -9748,7 +9741,6 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) /* For simplicity assume that all writes can cause interrupts. */ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); /* * DISAS_STOP isn't sufficient, we need to ensure we break out of * translated code to check for pending interrupts. @@ -12817,9 +12809,6 @@ static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel) gen_io_start(); } gen_helper_rdhwr_cc(t0, cpu_env); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } gen_store_gpr(t0, rt); /* * Break the TB to be able to take timer interrupts immediately diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 17d8f18..e17656e 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -862,10 +862,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) !tcg_op_buf_full() && num_insns < max_insns); - if (tb_cflags(tb) & CF_LAST_IO) { - gen_io_end(); - } - /* Indicate where the next block should start */ switch (dc->is_jmp) { case DISAS_NEXT: diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 9f9553a..2a9d13f 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -1861,7 +1861,6 @@ static void gen_darn(DisasContext *ctx) gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); } if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_stop_exception(ctx); } } @@ -3991,9 +3990,6 @@ static void gen_rfi(DisasContext *ctx) gen_update_cfar(ctx, ctx->base.pc_next - 4); gen_helper_rfi(cpu_env); gen_sync_exception(ctx); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } @@ -4011,9 +4007,6 @@ static void gen_rfid(DisasContext *ctx) gen_update_cfar(ctx, ctx->base.pc_next - 4); gen_helper_rfid(cpu_env); gen_sync_exception(ctx); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } @@ -4389,9 +4382,6 @@ static void gen_mtmsrd(DisasContext *ctx) /* Must stop the translation as machine state (may have) changed */ /* Note that mtmsr is not always defined as context-synchronizing */ gen_stop_exception(ctx); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } } #endif /* !defined(CONFIG_USER_ONLY) */ } @@ -4429,9 +4419,6 @@ static void gen_mtmsr(DisasContext *ctx) tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]); #endif gen_helper_store_msr(cpu_env, msr); - if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } tcg_temp_free(msr); /* Must stop the translation as machine state (may have) changed */ /* Note that mtmsr is not always defined as context-synchronizing */ diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c index 86fc8f2..66d9a73 100644 --- a/target/ppc/translate_init.inc.c +++ b/target/ppc/translate_init.inc.c @@ -189,7 +189,6 @@ static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) } gen_helper_load_decr(cpu_gpr[gprn], cpu_env); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_stop_exception(ctx); } } @@ -201,7 +200,6 @@ static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) } gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); gen_stop_exception(ctx); } } diff --git a/target/riscv/insn_trans/trans_rvi.inc.c b/target/riscv/insn_trans/trans_rvi.inc.c index ea64731..1af795e 100644 --- a/target/riscv/insn_trans/trans_rvi.inc.c +++ b/target/riscv/insn_trans/trans_rvi.inc.c @@ -511,7 +511,6 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) } while (0) #define RISCV_OP_CSR_POST do {\ - gen_io_end(); \ gen_set_gpr(a->rd, dest); \ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \ exit_tb(ctx); \ diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index aee733e..ecaa7a1 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -23,8 +23,6 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" -#define ALIGNED_ONLY - /* CPU Subtypes */ #define SH_CPU_SH7750 (1 << 0) #define SH_CPU_SH7750S (1 << 1) diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 0d5b01e..694d713 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -5,8 +5,6 @@ #include "cpu-qom.h" #include "exec/cpu-defs.h" -#define ALIGNED_ONLY - #if !defined(TARGET_SPARC64) #define TARGET_DPREGS 16 #else diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 091bab5..02c1612 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -4412,10 +4412,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_tick_set_limit(r_tickptr, cpu_tick_cmpr); tcg_temp_free_ptr(r_tickptr); - if (tb_cflags(dc->base.tb) & - CF_USE_ICOUNT) { - gen_io_end(); - } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4440,10 +4436,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_tick_set_count(r_tickptr, cpu_tmp0); tcg_temp_free_ptr(r_tickptr); - if (tb_cflags(dc->base.tb) & - CF_USE_ICOUNT) { - gen_io_end(); - } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4468,10 +4460,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_tick_set_limit(r_tickptr, cpu_stick_cmpr); tcg_temp_free_ptr(r_tickptr); - if (tb_cflags(dc->base.tb) & - CF_USE_ICOUNT) { - gen_io_end(); - } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4588,10 +4576,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_helper_tick_set_count(r_tickptr, cpu_tmp0); tcg_temp_free_ptr(r_tickptr); - if (tb_cflags(dc->base.tb) & - CF_USE_ICOUNT) { - gen_io_end(); - } /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c index d27451e..0e01f35 100644 --- a/target/unicore32/translate.c +++ b/target/unicore32/translate.c @@ -1931,7 +1931,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) code. */ cpu_abort(cs, "IO on conditional branch instruction"); } - gen_io_end(); } /* At this stage dc->condjmp will only be set when the skipped diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 2c27713..0459243 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -32,8 +32,6 @@ #include "exec/cpu-defs.h" #include "xtensa-isa.h" -#define ALIGNED_ONLY - /* Xtensa processors have a weak memory model */ #define TCG_GUEST_DEFAULT_MO (0) diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index fa12a57..d20e60c 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -539,9 +539,6 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4) gen_io_start(); } gen_helper_waiti(cpu_env, pc, intlevel); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } tcg_temp_free(pc); tcg_temp_free(intlevel); } @@ -2215,9 +2212,6 @@ static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[], } gen_helper_update_ccount(cpu_env); tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } @@ -2607,9 +2601,6 @@ static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in); gen_helper_update_ccompare(cpu_env, tmp); tcg_temp_free(tmp); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } @@ -2621,9 +2612,6 @@ static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[], gen_io_start(); } gen_helper_wsr_ccount(cpu_env, arg[0].in); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } @@ -2830,9 +2818,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(arg[0].out, tmp); tcg_temp_free(tmp); - if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { - gen_io_end(); - } #endif } |