aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/avr/disas.c21
-rw-r--r--target/hppa/cpu.h2
-rw-r--r--target/loongarch/cpu.h1
-rw-r--r--target/loongarch/internals.h2
-rw-r--r--target/loongarch/kvm/kvm.c15
-rw-r--r--target/loongarch/tcg/csr_helper.c2
-rw-r--r--target/loongarch/tcg/tlb_helper.c15
-rw-r--r--target/mips/cpu-param.h5
-rw-r--r--target/mips/tcg/system/cp0_helper.c32
-rw-r--r--target/mips/tcg/system/tlb_helper.c4
-rw-r--r--target/mips/tcg/tcg-internal.h2
-rw-r--r--target/ppc/cpu.h11
-rw-r--r--target/ppc/cpu_init.c8
-rw-r--r--target/ppc/excp_helper.c4
-rw-r--r--target/ppc/translate/vmx-impl.c.inc2
-rw-r--r--target/ppc/translate/vsx-impl.c.inc20
-rw-r--r--target/riscv/cpu.h12
-rw-r--r--target/riscv/csr.c7
-rw-r--r--target/riscv/insn_trans/trans_rvi.c.inc8
-rw-r--r--target/riscv/insn_trans/trans_rvzicfiss.c.inc17
-rw-r--r--target/riscv/kvm/kvm-cpu.c6
-rw-r--r--target/riscv/op_helper.c8
-rw-r--r--target/riscv/translate.c4
-rw-r--r--target/riscv/vcrypto_helper.c32
-rw-r--r--target/riscv/vector_helper.c186
-rw-r--r--target/riscv/vector_internals.c4
-rw-r--r--target/riscv/vector_internals.h12
-rw-r--r--target/s390x/cpu.c2
-rw-r--r--target/sparc/cpu.h2
-rw-r--r--target/sparc/ldst_helper.c6
-rw-r--r--target/sparc/mmu_helper.c2
31 files changed, 261 insertions, 193 deletions
diff --git a/target/avr/disas.c b/target/avr/disas.c
index b7689e8d..d341030 100644
--- a/target/avr/disas.c
+++ b/target/avr/disas.c
@@ -68,28 +68,35 @@ static bool decode_insn(DisasContext *ctx, uint16_t insn);
int avr_print_insn(bfd_vma addr, disassemble_info *info)
{
- DisasContext ctx;
+ DisasContext ctx = { info };
DisasContext *pctx = &ctx;
bfd_byte buffer[4];
uint16_t insn;
int status;
- ctx.info = info;
-
- status = info->read_memory_func(addr, buffer, 4, info);
+ status = info->read_memory_func(addr, buffer, 2, info);
if (status != 0) {
info->memory_error_func(status, addr, info);
return -1;
}
insn = bfd_getl16(buffer);
- ctx.next_word = bfd_getl16(buffer + 2);
- ctx.next_word_used = false;
+
+ status = info->read_memory_func(addr + 2, buffer + 2, 2, info);
+ if (status == 0) {
+ ctx.next_word = bfd_getl16(buffer + 2);
+ }
if (!decode_insn(&ctx, insn)) {
output(".db", "0x%02x, 0x%02x", buffer[0], buffer[1]);
}
- return ctx.next_word_used ? 4 : 2;
+ if (!ctx.next_word_used) {
+ return 2;
+ } else if (status == 0) {
+ return 4;
+ }
+ info->memory_error_func(status, addr + 2, info);
+ return -1;
}
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 7be4a1d..8b36642 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -391,6 +391,4 @@ void hppa_cpu_alarm_timer(void *);
#endif
G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
-#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
-
#endif /* HPPA_CPU_H */
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index eae874c..254e4fb 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -426,6 +426,7 @@ struct ArchCPU {
const char *dtb_compatible;
/* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */
uint64_t kvm_state_counter;
+ VMChangeStateEntry *vmsentry;
};
/**
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
index 1cd959a..9fdc305 100644
--- a/target/loongarch/internals.h
+++ b/target/loongarch/internals.h
@@ -43,7 +43,7 @@ enum {
TLBRET_PE = 7,
};
-bool check_ps(CPULoongArchState *ent, int ps);
+bool check_ps(CPULoongArchState *ent, uint8_t ps);
extern const VMStateDescription vmstate_loongarch_cpu;
diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c
index 28735c8..f0e3cfe 100644
--- a/target/loongarch/kvm/kvm.c
+++ b/target/loongarch/kvm/kvm.c
@@ -1080,9 +1080,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
uint64_t val;
int ret;
Error *local_err = NULL;
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
- ret = 0;
- qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
+ cpu->vmsentry = qemu_add_vm_change_state_handler(
+ kvm_loongarch_vm_stage_change, cs);
if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
brk_insn = val;
@@ -1091,29 +1092,34 @@ int kvm_arch_init_vcpu(CPUState *cs)
ret = kvm_cpu_check_lsx(cs, &local_err);
if (ret < 0) {
error_report_err(local_err);
+ return ret;
}
ret = kvm_cpu_check_lasx(cs, &local_err);
if (ret < 0) {
error_report_err(local_err);
+ return ret;
}
ret = kvm_cpu_check_lbt(cs, &local_err);
if (ret < 0) {
error_report_err(local_err);
+ return ret;
}
ret = kvm_cpu_check_pmu(cs, &local_err);
if (ret < 0) {
error_report_err(local_err);
+ return ret;
}
ret = kvm_cpu_check_pv_features(cs, &local_err);
if (ret < 0) {
error_report_err(local_err);
+ return ret;
}
- return ret;
+ return 0;
}
static bool loongarch_get_lbt(Object *obj, Error **errp)
@@ -1193,6 +1199,9 @@ void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu)
int kvm_arch_destroy_vcpu(CPUState *cs)
{
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
+
+ qemu_del_vm_change_state_handler(cpu->vmsentry);
return 0;
}
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
index 379c71e..6a7a65c 100644
--- a/target/loongarch/tcg/csr_helper.c
+++ b/target/loongarch/tcg/csr_helper.c
@@ -115,7 +115,7 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val)
target_ulong helper_csrwr_pwcl(CPULoongArchState *env, target_ulong val)
{
- int shift, ptbase;
+ uint8_t shift, ptbase;
int64_t old_v = env->CSR_PWCL;
/*
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index 646dbf5..70d1b5c 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -19,12 +19,12 @@
#include "exec/log.h"
#include "cpu-csr.h"
-bool check_ps(CPULoongArchState *env, int tlb_ps)
+bool check_ps(CPULoongArchState *env, uint8_t tlb_ps)
{
- if (tlb_ps > 64) {
- return false;
- }
- return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
+ if (tlb_ps >= 64) {
+ return false;
+ }
+ return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2);
}
void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
@@ -543,7 +543,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
target_ulong level, uint32_t mem_idx)
{
CPUState *cs = env_cpu(env);
- target_ulong badvaddr, index, phys, ret;
+ target_ulong badvaddr, index, phys;
uint64_t dir_base, dir_width;
if (unlikely((level == 0) || (level > 4))) {
@@ -571,8 +571,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
get_dir_base_width(env, &dir_base, &dir_width, level);
index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
phys = base | index << 3;
- ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
- return ret;
+ return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
}
void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h
index 11b3ac0..8fcb1b4 100644
--- a/target/mips/cpu-param.h
+++ b/target/mips/cpu-param.h
@@ -18,12 +18,7 @@
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
#endif
-#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_BITS 12
-#else
-#define TARGET_PAGE_BITS_VARY
-#define TARGET_PAGE_BITS_MIN 12
-#endif
#define TCG_GUEST_DEFAULT_MO (0)
diff --git a/target/mips/tcg/system/cp0_helper.c b/target/mips/tcg/system/cp0_helper.c
index 01a07a1..78e422b 100644
--- a/target/mips/tcg/system/cp0_helper.c
+++ b/target/mips/tcg/system/cp0_helper.c
@@ -864,36 +864,24 @@ void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
}
}
-void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
+uint32_t compute_pagemask(uint32_t val)
{
- uint32_t mask;
- int maskbits;
-
/* Don't care MASKX as we don't support 1KB page */
- mask = extract32((uint32_t)arg1, CP0PM_MASK, 16);
- maskbits = cto32(mask);
+ uint32_t mask = extract32(val, CP0PM_MASK, 16);
+ int maskbits = cto32(mask);
- /* Ensure no more set bit after first zero */
- if ((mask >> maskbits) != 0) {
- goto invalid;
- }
- /* We don't support VTLB entry smaller than target page */
- if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) {
- goto invalid;
+ /* Ensure no more set bit after first zero, and maskbits even. */
+ if ((mask >> maskbits) == 0 && maskbits % 2 == 0) {
+ return mask << CP0PM_MASK;
+ } else {
+ /* When invalid, set to default target page size. */
+ return 0;
}
- env->CP0_PageMask = mask << CP0PM_MASK;
-
- return;
-
-invalid:
- /* When invalid, set to default target page size. */
- mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN);
- env->CP0_PageMask = mask << CP0PM_MASK;
}
void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
{
- update_pagemask(env, arg1, &env->CP0_PageMask);
+ env->CP0_PageMask = compute_pagemask(arg1);
}
void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
diff --git a/target/mips/tcg/system/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c
index ca4d6b2..df80301 100644
--- a/target/mips/tcg/system/tlb_helper.c
+++ b/target/mips/tcg/system/tlb_helper.c
@@ -875,8 +875,8 @@ refill:
break;
}
}
- pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
- update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
+ pw_pagemask = m >> TARGET_PAGE_BITS;
+ pw_pagemask = compute_pagemask(pw_pagemask << CP0PM_MASK);
pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
{
target_ulong tmp_entryhi = env->CP0_EntryHi;
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
index 74fc130..950e6af 100644
--- a/target/mips/tcg/tcg-internal.h
+++ b/target/mips/tcg/tcg-internal.h
@@ -47,7 +47,7 @@ bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
void mmu_init(CPUMIPSState *env, const mips_def_t *def);
-void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
+uint32_t compute_pagemask(uint32_t val);
void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra);
uint32_t cpu_mips_get_random(CPUMIPSState *env);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index efab54a..3ee8351 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1356,6 +1356,17 @@ struct CPUArchState {
* special way (such as routing some resume causes to 0x100, i.e. sreset).
*/
bool resume_as_sreset;
+
+ /*
+ * On powernv, quiesced means the CPU has been stopped using PC direct
+ * control xscom registers.
+ *
+ * On spapr, quiesced means it is in the "RTAS stopped" state.
+ *
+ * The core halted/stopped variables aren't sufficient for this, because
+ * they can be changed with various side-band operations like qmp cont,
+ * powersave interrupts, etc.
+ */
bool quiesced;
#endif
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 8b590e7..7decc09 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -2744,14 +2744,6 @@ static void init_proc_e200(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000); /* TOFIX */
- spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
init_tlbs_emb(env);
init_excp_e200(env, 0xFFFF0000UL);
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 44e19aa..c941c89 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -1951,6 +1951,10 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
target_ulong lpcr = env->spr[SPR_LPCR];
bool async_deliver;
+ if (unlikely(env->quiesced)) {
+ return 0;
+ }
+
#ifdef TARGET_PPC64
switch (env->excp_model) {
case POWERPC_EXCP_POWER7:
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 70d0ad2..92d6e8c 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -994,8 +994,8 @@ static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
{
TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
- REQUIRE_VECTOR(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
ah = tcg_temp_new_i64();
al = tcg_temp_new_i64();
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index a869f30..00ad57c 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -61,8 +61,8 @@ static bool trans_LXVD2X(DisasContext *ctx, arg_LXVD2X *a)
TCGv EA;
TCGv_i64 t0;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
@@ -80,8 +80,8 @@ static bool trans_LXVW4X(DisasContext *ctx, arg_LXVW4X *a)
TCGv EA;
TCGv_i64 xth, xtl;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
xth = tcg_temp_new_i64();
xtl = tcg_temp_new_i64();
@@ -113,12 +113,12 @@ static bool trans_LXVWSX(DisasContext *ctx, arg_LXVWSX *a)
TCGv EA;
TCGv_i32 data;
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
if (a->rt < 32) {
REQUIRE_VSX(ctx);
} else {
REQUIRE_VECTOR(ctx);
}
- REQUIRE_INSNS_FLAGS2(ctx, ISA300);
gen_set_access_type(ctx, ACCESS_INT);
EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
@@ -133,8 +133,8 @@ static bool trans_LXVDSX(DisasContext *ctx, arg_LXVDSX *a)
TCGv EA;
TCGv_i64 data;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
gen_set_access_type(ctx, ACCESS_INT);
EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]);
@@ -185,8 +185,8 @@ static bool trans_LXVH8X(DisasContext *ctx, arg_LXVH8X *a)
TCGv EA;
TCGv_i64 xth, xtl;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
xth = tcg_temp_new_i64();
xtl = tcg_temp_new_i64();
@@ -208,8 +208,8 @@ static bool trans_LXVB16X(DisasContext *ctx, arg_LXVB16X *a)
TCGv EA;
TCGv_i128 data;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
data = tcg_temp_new_i128();
gen_set_access_type(ctx, ACCESS_INT);
@@ -312,8 +312,8 @@ static bool trans_STXVD2X(DisasContext *ctx, arg_STXVD2X *a)
TCGv EA;
TCGv_i64 t0;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
@@ -331,8 +331,8 @@ static bool trans_STXVW4X(DisasContext *ctx, arg_STXVW4X *a)
TCGv EA;
TCGv_i64 xsh, xsl;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
@@ -364,8 +364,8 @@ static bool trans_STXVH8X(DisasContext *ctx, arg_STXVH8X *a)
TCGv EA;
TCGv_i64 xsh, xsl;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
@@ -394,8 +394,8 @@ static bool trans_STXVB16X(DisasContext *ctx, arg_STXVB16X *a)
TCGv EA;
TCGv_i128 data;
- REQUIRE_VSX(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
data = tcg_temp_new_i128();
gen_set_access_type(ctx, ACCESS_INT);
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 7de19b4..51e49e0 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -765,6 +765,18 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
}
#endif
+static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
+ target_long priv_ver,
+ uint32_t misa_ext)
+{
+ /* In priv spec version 1.12 or newer, C always implies Zca */
+ if (priv_ver >= PRIV_VERSION_1_12_0) {
+ return cfg->ext_zca;
+ } else {
+ return misa_ext & RVC;
+ }
+}
+
/*
* Encode LMUL to lmul as follows:
* LMUL vlmul lmul
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 49566d3..7948188 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -192,6 +192,11 @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
+ /* If ext implemented, M-mode always have access to SSP CSR */
+ if (env->priv == PRV_M) {
+ return RISCV_EXCP_NONE;
+ }
+
/* if bcfi not active for current env, access to csr is illegal */
if (!cpu_get_bcfien(env)) {
#if !defined(CONFIG_USER_ONLY)
@@ -4297,7 +4302,7 @@ static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
}
/* Update sctrstatus.WRPTR with a legal value */
- depth = 16 << depth;
+ depth = 16ULL << depth;
env->sctrstatus =
env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
}
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
index b55f56a..b9c7160 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -151,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
tcg_gen_ext32s_tl(target_pc, target_pc);
}
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
TCGv t0 = tcg_temp_new();
misaligned = gen_new_label();
@@ -300,7 +302,9 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
gen_set_label(l); /* branch taken */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext) &&
(a->imm & 0x3)) {
/* misaligned */
TCGv target_pc = tcg_temp_new();
diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
index e3ebc49..b0096ad 100644
--- a/target/riscv/insn_trans/trans_rvzicfiss.c.inc
+++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
@@ -15,6 +15,13 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
+#define REQUIRE_ZICFISS(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zicfiss) { \
+ return false; \
+ } \
+} while (0)
+
static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
{
if (!ctx->bcfi_enabled) {
@@ -77,6 +84,11 @@ static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
{
REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
if (!ctx->bcfi_enabled) {
return false;
}
@@ -97,6 +109,11 @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_A_OR_ZAAMO(ctx);
+ REQUIRE_ZICFISS(ctx);
+ if (ctx->priv == PRV_M) {
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
+ }
+
if (!ctx->bcfi_enabled) {
return false;
}
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 4ffeeaa..0f4997a 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -624,8 +624,6 @@ static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
env->stval = 0;
env->mip = 0;
env->satp = 0;
- env->scounteren = 0;
- env->senvcfg = 0;
}
static int kvm_riscv_get_regs_csr(CPUState *cs)
@@ -641,8 +639,6 @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
- KVM_RISCV_GET_CSR(cs, env, scounteren, env->scounteren);
- KVM_RISCV_GET_CSR(cs, env, senvcfg, env->senvcfg);
return 0;
}
@@ -660,8 +656,6 @@ static int kvm_riscv_put_regs_csr(CPUState *cs)
KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
- KVM_RISCV_SET_CSR(cs, env, scounteren, env->scounteren);
- KVM_RISCV_SET_CSR(cs, env, senvcfg, env->senvcfg);
return 0;
}
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 0d4220b..72dc48e 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -279,7 +279,9 @@ target_ulong helper_sret(CPURISCVState *env)
}
target_ulong retpc = env->sepc;
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
@@ -357,7 +359,9 @@ static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
+ env->priv_ver,
+ env->misa_ext) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index eaa5d86..d6651f2 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -606,7 +606,9 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
TCGv succ_pc = dest_gpr(ctx, rd);
/* check misaligned: */
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
+ ctx->priv_ver,
+ ctx->misa_ext)) {
if ((imm & 0x3) != 0) {
TCGv target_pc = tcg_temp_new();
gen_pc_plus_diff(target_pc, ctx, imm);
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index f7423df..1526de9 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -222,7 +222,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -248,7 +248,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
uint32_t vta = vext_vta(desc); \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
AESState round_key; \
@@ -309,7 +309,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 10 || uimm == 0) {
@@ -357,7 +357,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
uimm &= 0b1111;
if (uimm > 14 || uimm < 2) {
@@ -465,7 +465,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
if (sew == MO_32) {
@@ -582,7 +582,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -602,7 +602,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -622,7 +622,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
@@ -642,7 +642,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
uint32_t total_elems;
uint32_t vta = vext_vta(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
@@ -676,7 +676,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t *vs1 = vs1_vptr;
uint32_t *vs2 = vs2_vptr;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
uint32_t w[24];
@@ -777,7 +777,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
uint32_t *vs2 = vs2_vptr;
uint32_t v1[8], v2[8], v3[8];
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
for (int k = 0; k < 8; k++) {
@@ -802,7 +802,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
@@ -841,7 +841,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
uint32_t vta = vext_vta(desc);
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
@@ -879,7 +879,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -937,7 +937,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
@@ -973,7 +973,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
uint32_t esz = sizeof(uint32_t);
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (uint32_t i = group_start; i < group_end; ++i) {
uint32_t vstart = i * egs;
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 7773df6..67b3baf 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -260,7 +260,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
k = 0;
@@ -383,10 +383,7 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
uint32_t msize = nf * esz;
int mmu_index = riscv_env_mmu_index(env, false);
- if (env->vstart >= evl) {
- env->vstart = 0;
- return;
- }
+ VSTART_CHECK_EARLY_EXIT(env, evl);
#if defined(CONFIG_USER_ONLY)
/*
@@ -544,7 +541,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t esz = 1 << log2_esz;
uint32_t vma = vext_vma(desc);
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
/* load bytes from guest memory */
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
@@ -633,47 +630,69 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
uint32_t esz = 1 << log2_esz;
uint32_t msize = nf * esz;
uint32_t vma = vext_vma(desc);
- target_ulong addr, offset, remain, page_split, elems;
+ target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
+ int flags;
+ void *host;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
- /* probe every access */
- for (i = env->vstart; i < env->vl; i++) {
- if (!vm && !vext_elem_mask(v0, i)) {
- continue;
- }
- addr = adjust_addr(env, base + i * (nf << log2_esz));
- if (i == 0) {
- /* Allow fault on first element. */
- probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
- } else {
- remain = nf << log2_esz;
- while (remain > 0) {
- void *host;
- int flags;
-
- offset = -(addr | TARGET_PAGE_MASK);
-
- /* Probe nonfault on subsequent elements. */
- flags = probe_access_flags(env, addr, offset, MMU_DATA_LOAD,
- mmu_index, true, &host, 0);
-
- /*
- * Stop if invalid (unmapped) or mmio (transaction may fail).
- * Do not stop if watchpoint, as the spec says that
- * first-fault should continue to access the same
- * elements regardless of any watchpoint.
- */
- if (flags & ~TLB_WATCHPOINT) {
- vl = i;
- goto ProbeSuccess;
- }
- if (remain <= offset) {
- break;
+ addr = base + ((env->vstart * nf) << log2_esz);
+ page_split = -(addr | TARGET_PAGE_MASK);
+ /* Get number of elements */
+ elems = page_split / msize;
+ if (unlikely(env->vstart + elems >= env->vl)) {
+ elems = env->vl - env->vstart;
+ }
+
+ /* Check page permission/pmp/watchpoint/etc. */
+ flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize,
+ MMU_DATA_LOAD, mmu_index, true, &host, ra);
+
+ /* If we are crossing a page check also the second page. */
+ if (env->vl > elems) {
+ addr_probe = addr + (elems << log2_esz);
+ flags |= probe_access_flags(env, adjust_addr(env, addr_probe),
+ elems * msize, MMU_DATA_LOAD, mmu_index,
+ true, &host, ra);
+ }
+
+ if (flags & ~TLB_WATCHPOINT) {
+ /* probe every access */
+ for (i = env->vstart; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, i)) {
+ continue;
+ }
+ addr_i = adjust_addr(env, base + i * (nf << log2_esz));
+ if (i == 0) {
+ /* Allow fault on first element. */
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD);
+ } else {
+ remain = nf << log2_esz;
+ while (remain > 0) {
+ offset = -(addr_i | TARGET_PAGE_MASK);
+
+ /* Probe nonfault on subsequent elements. */
+ flags = probe_access_flags(env, addr_i, offset,
+ MMU_DATA_LOAD, mmu_index, true,
+ &host, 0);
+
+ /*
+ * Stop if invalid (unmapped) or mmio (transaction may
+ * fail). Do not stop if watchpoint, as the spec says that
+ * first-fault should continue to access the same
+ * elements regardless of any watchpoint.
+ */
+ if (flags & ~TLB_WATCHPOINT) {
+ vl = i;
+ goto ProbeSuccess;
+ }
+ if (remain <= offset) {
+ break;
+ }
+ remain -= offset;
+ addr_i = adjust_addr(env, addr_i + offset);
}
- remain -= offset;
- addr = adjust_addr(env, addr + offset);
}
}
}
@@ -685,15 +704,6 @@ ProbeSuccess:
if (env->vstart < env->vl) {
if (vm) {
- /* Calculate the page range of first page */
- addr = base + ((env->vstart * nf) << log2_esz);
- page_split = -(addr | TARGET_PAGE_MASK);
- /* Get number of elements */
- elems = page_split / msize;
- if (unlikely(env->vstart + elems >= env->vl)) {
- elems = env->vl - env->vstart;
- }
-
/* Load/store elements in the first page */
if (likely(elems)) {
vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
@@ -1103,7 +1113,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1137,7 +1147,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1174,7 +1184,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1214,7 +1224,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -1312,7 +1322,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1361,7 +1371,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -1425,7 +1435,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -1492,7 +1502,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -2041,7 +2051,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -2067,7 +2077,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
@@ -2092,7 +2102,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
@@ -2118,7 +2128,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -2165,8 +2175,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -2190,6 +2198,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vv_rm_1(vd, v0, vs1, vs2,
@@ -2292,8 +2302,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vl, uint32_t vm, int vxrm,
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
{
- VSTART_CHECK_EARLY_EXIT(env);
-
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -2317,6 +2325,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vta = vext_vta(desc);
uint32_t vma = vext_vma(desc);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
switch (env->vxrm) {
case 0: /* rnu */
vext_vx_rm_1(vd, v0, s1, vs2,
@@ -3091,7 +3101,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -3136,7 +3146,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -3724,7 +3734,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
if (vl == 0) { \
return; \
@@ -4247,7 +4257,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
@@ -4289,7 +4299,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4484,7 +4494,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
@@ -4652,6 +4662,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4740,6 +4752,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
TD s1 = *((TD *)vs1 + HD(0)); \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -4814,7 +4828,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
uint32_t i; \
int a, b; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
a = vext_elem_mask(vs1, i); \
@@ -4904,6 +4918,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
int i;
bool first_mask_bit = false;
+ VSTART_CHECK_EARLY_EXIT(env, vl);
+
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
@@ -4976,6 +4992,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
uint32_t sum = 0; \
int i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
@@ -5009,7 +5027,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
uint32_t vma = vext_vma(desc); \
int i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5046,7 +5064,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong offset = s1, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MAX(env->vstart, offset); \
for (i = i_min; i < vl; i++) { \
@@ -5081,7 +5099,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint32_t vma = vext_vma(desc); \
target_ulong i_max, i_min, i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
i_max = MAX(i_min, env->vstart); \
@@ -5125,7 +5143,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5176,7 +5194,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5253,7 +5271,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint64_t index; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5298,7 +5316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
uint64_t index = s1; \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
@@ -5334,6 +5352,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
+ \
for (i = env->vstart; i < vl; i++) { \
if (!vext_elem_mask(vs1, i)) { \
continue; \
@@ -5394,7 +5414,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 05b2d01..b490b1d 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
@@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
uint32_t vma = vext_vma(desc);
uint32_t i;
- VSTART_CHECK_EARLY_EXIT(env);
+ VSTART_CHECK_EARLY_EXIT(env, vl);
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index a11cc83..8eee7e5 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -25,11 +25,11 @@
#include "tcg/tcg-gvec-desc.h"
#include "internals.h"
-#define VSTART_CHECK_EARLY_EXIT(env) do { \
- if (env->vstart >= env->vl) { \
- env->vstart = 0; \
- return; \
- } \
+#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
+ if (env->vstart >= vl) { \
+ env->vstart = 0; \
+ return; \
+ } \
} while (0)
static inline uint32_t vext_nf(uint32_t desc)
@@ -159,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
uint32_t vma = vext_vma(desc); \
uint32_t i; \
\
- VSTART_CHECK_EARLY_EXIT(env); \
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
\
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index d731426..1f75629 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -377,7 +377,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
resettable_class_set_parent_phases(rc, NULL, s390_cpu_reset_hold, NULL,
&scc->parent_phases);
- cc->class_by_name = s390_cpu_class_by_name,
+ cc->class_by_name = s390_cpu_class_by_name;
cc->mmu_index = s390x_cpu_mmu_index;
cc->dump_state = s390_cpu_dump_state;
cc->query_cpu_fast = s390_query_cpu_fast;
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 462bcb6..68f8c21 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -604,7 +604,7 @@ void dump_mmu(CPUSPARCState *env);
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
- uint8_t *buf, int len, bool is_write);
+ uint8_t *buf, size_t len, bool is_write);
#endif
/* translate.c */
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index b559afc..45882e2 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -600,6 +600,9 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case 0x0C: /* Leon3 Date Cache config */
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
ret = leon3_cache_control_ld(env, addr, size);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
+ " address, size: %d\n", addr, size);
}
break;
case 0x01c00a00: /* MXCC control register */
@@ -816,6 +819,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
case 0x0C: /* Leon3 Date Cache config */
if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
leon3_cache_control_st(env, addr, val, size);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented"
+ " address, size: %d\n", addr, size);
}
break;
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 7548d01..3821cd9 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -389,7 +389,7 @@ void dump_mmu(CPUSPARCState *env)
* that the sparc ABI is followed.
*/
int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
- uint8_t *buf, int len, bool is_write)
+ uint8_t *buf, size_t len, bool is_write)
{
CPUSPARCState *env = cpu_env(cs);
target_ulong addr = address;