aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.c6
-rw-r--r--target/arm/helper-a64.c20
-rw-r--r--target/arm/helper.c178
-rw-r--r--target/arm/translate-a64.c2
-rw-r--r--target/arm/translate.c39
-rw-r--r--target/i386/cpu.c2
-rw-r--r--target/ppc/compat.c2
-rw-r--r--target/s390x/translate.c9
8 files changed, 224 insertions, 34 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 47c8b2a..7f7a3d1 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -489,13 +489,19 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
info->print_insn = print_insn_arm_a64;
#endif
info->cap_arch = CS_ARCH_ARM64;
+ info->cap_insn_unit = 4;
+ info->cap_insn_split = 4;
} else {
int cap_mode;
if (env->thumb) {
info->print_insn = print_insn_thumb1;
+ info->cap_insn_unit = 2;
+ info->cap_insn_split = 4;
cap_mode = CS_MODE_THUMB;
} else {
info->print_insn = print_insn_arm;
+ info->cap_insn_unit = 4;
+ info->cap_insn_split = 4;
cap_mode = CS_MODE_ARM;
}
if (arm_feature(env, ARM_FEATURE_V8)) {
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index d0e435c..b84ebca 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -432,9 +432,8 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
/* Returns 0 on success; 1 otherwise. */
static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi,
- bool parallel)
+ bool parallel, uintptr_t ra)
{
- uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv;
bool success;
@@ -456,6 +455,8 @@ static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
+
+ helper_retaddr = ra;
o0 = ldq_le_p(haddr + 0);
o1 = ldq_le_p(haddr + 1);
oldv = int128_make128(o0, o1);
@@ -465,6 +466,7 @@ static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
stq_le_p(haddr + 0, int128_getlo(newv));
stq_le_p(haddr + 1, int128_gethi(newv));
}
+ helper_retaddr = 0;
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
@@ -488,20 +490,19 @@ static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
- return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false);
+ return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false, GETPC());
}
uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
- return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true);
+ return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true, GETPC());
}
static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi,
- bool parallel)
+ bool parallel, uintptr_t ra)
{
- uintptr_t ra = GETPC();
Int128 oldv, cmpv, newv;
bool success;
@@ -523,6 +524,8 @@ static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
+
+ helper_retaddr = ra;
o1 = ldq_be_p(haddr + 0);
o0 = ldq_be_p(haddr + 1);
oldv = int128_make128(o0, o1);
@@ -532,6 +535,7 @@ static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
stq_be_p(haddr + 0, int128_gethi(newv));
stq_be_p(haddr + 1, int128_getlo(newv));
}
+ helper_retaddr = 0;
#else
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
@@ -555,11 +559,11 @@ static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
- return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false);
+ return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false, GETPC());
}
uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
uint64_t new_lo, uint64_t new_hi)
{
- return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true);
+ return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true, GETPC());
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 96113fe..f61fb3e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -19,17 +19,23 @@
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
/* Security attributes for an address, as returned by v8m_security_lookup. */
typedef struct V8M_SAttributes {
@@ -2159,9 +2165,10 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
uint64_t par64;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
- ret = get_phys_addr(env, value, access_type, mmu_idx,
- &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
+ &prot, &page_size, &fsr, &fi, &cacheattrs);
if (extended_addresses_enabled(env)) {
/* fsr is a DFSR/IFSR value for the long descriptor
* translation table format, but with WnR always clear.
@@ -2173,7 +2180,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
if (!attrs.secure) {
par64 |= (1 << 9); /* NS */
}
- /* We don't set the ATTR or SH fields in the PAR. */
+ par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
+ par64 |= cacheattrs.shareability << 7; /* SH */
} else {
par64 |= 1; /* F */
par64 |= (fsr & 0x3f) << 1; /* FS */
@@ -6925,7 +6933,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
return false;
}
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
- &physaddr, &attrs, &prot, &page_size, &fsr, &fi)) {
+ &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
/* the MPU lookup failed */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -8207,7 +8215,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
int ret;
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
- &txattrs, &s2prot, &s2size, fsr, fi);
+ &txattrs, &s2prot, &s2size, fsr, fi, NULL);
if (ret) {
fi->s2addr = addr;
fi->stage2 = true;
@@ -8608,11 +8616,41 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
return true;
}
+/* Translate from the 4-bit stage 2 representation of
+ * memory attributes (without cache-allocation hints) to
+ * the 8-bit representation of the stage 1 MAIR registers
+ * (which includes allocation hints).
+ *
+ * ref: shared/translation/attrs/S2AttrDecode()
+ * .../S2ConvertAttrsHints()
+ */
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
+{
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
+ uint8_t loattr = extract32(s2attrs, 0, 2);
+ uint8_t hihint = 0, lohint = 0;
+
+ if (hiattr != 0) { /* normal memory */
+ if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
+ hiattr = loattr = 1; /* non-cacheable */
+ } else {
+ if (hiattr != 1) { /* Write-through or write-back */
+ hihint = 3; /* RW allocate */
+ }
+ if (loattr != 1) { /* Write-through or write-back */
+ lohint = 3; /* RW allocate */
+ }
+ }
+ }
+
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
+}
+
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
@@ -8929,6 +8967,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
*/
txattrs->secure = false;
}
+
+ if (cacheattrs != NULL) {
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ cacheattrs->attrs = convert_stage2_attrs(env,
+ extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ }
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+ }
+
*phys_ptr = descaddr;
*page_size_ptr = page_size;
return false;
@@ -9490,6 +9543,93 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
return false;
}
+/* Combine either inner or outer cacheability attributes for normal
+ * memory, according to table D4-42 and pseudocode procedure
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
+ *
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
+ * some asymmetry.
+ */
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
+{
+ if (s1 == 4 || s2 == 4) {
+ /* non-cacheable has precedence */
+ return 4;
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
+ /* stage 1 write-through takes precedence */
+ return s1;
+ } else if (extract32(s2, 2, 2) == 2) {
+ /* stage 2 write-through takes precedence, but the allocation hint
+ * is still taken from stage 1
+ */
+ return (2 << 2) | extract32(s1, 0, 2);
+ } else { /* write-back */
+ return s1;
+ }
+}
+
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+ * and CombineS1S2Desc()
+ *
+ * @s1: Attributes from stage 1 walk
+ * @s2: Attributes from stage 2 walk
+ */
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
+ uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
+ ARMCacheAttrs ret;
+
+ /* Combine shareability attributes (table D4-43) */
+ if (s1.shareability == 2 || s2.shareability == 2) {
+ /* if either are outer-shareable, the result is outer-shareable */
+ ret.shareability = 2;
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
+ /* if either are inner-shareable, the result is inner-shareable */
+ ret.shareability = 3;
+ } else {
+ /* both non-shareable */
+ ret.shareability = 0;
+ }
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret.attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret.attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret.attrs = 8; /* nGRE */
+ } else {
+ ret.attrs = 0xc; /* GRE */
+ }
+
+ /* Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ */
+ ret.shareability = 2;
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+
+ if (ret.attrs == 0x44) {
+ /* Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ */
+ ret.shareability = 2;
+ }
+ }
+
+ return ret;
+}
+
+
/* get_phys_addr - get the physical address for this virtual address
*
* Find the physical address corresponding to the given virtual address,
@@ -9514,12 +9654,14 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
* @fsr: set to the DFSR/IFSR value on failure
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
/* Call ourselves recursively to do the stage 1 and then stage 2
@@ -9529,10 +9671,11 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
hwaddr ipa;
int s2_prot;
int ret;
+ ARMCacheAttrs cacheattrs2 = {};
ret = get_phys_addr(env, address, access_type,
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
- prot, page_size, fsr, fi);
+ prot, page_size, fsr, fi, cacheattrs);
/* If S1 fails or S2 is disabled, return early. */
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
@@ -9543,10 +9686,17 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* S1 is done. Now do S2 translation. */
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
phys_ptr, attrs, &s2_prot,
- page_size, fsr, fi);
+ page_size, fsr, fi,
+ cacheattrs != NULL ? &cacheattrs2 : NULL);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
*prot &= s2_prot;
+
+ /* Combine the S1 and S2 cache attributes, if needed */
+ if (!ret && cacheattrs != NULL) {
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ }
+
return ret;
} else {
/*
@@ -9617,7 +9767,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr, fi);
+ attrs, prot, page_size, fsr, fi, cacheattrs);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
attrs, prot, page_size, fsr, fi);
@@ -9645,7 +9795,7 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
ret = get_phys_addr(env, address, access_type,
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fsr, fi);
+ &attrs, &prot, &page_size, fsr, fi, NULL);
if (!ret) {
/* Map a single [sub]page. */
phys_addr &= TARGET_PAGE_MASK;
@@ -9674,7 +9824,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fsr, &fi);
+ attrs, &prot, &page_size, &fsr, &fi, NULL);
if (ret) {
return -1;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index caca05a..625ef2d 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2351,6 +2351,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
post_index = false;
writeback = true;
break;
+ default:
+ g_assert_not_reached();
}
if (rn == 31) {
diff --git a/target/arm/translate.c b/target/arm/translate.c
index df57dbb..4afb0c8 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -7903,9 +7903,27 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i64 t64 = tcg_temp_new_i64();
- gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
+ /* For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. That means we don't want to do a
+ * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
+ * for an architecturally 64-bit access, but instead do a
+ * 64-bit access using MO_BE if appropriate and then split
+ * the two halves.
+ * This only makes a difference for BE32 user-mode, where
+ * frob64() must not flip the two halves of the 64-bit data
+ * but this code must treat BE32 user-mode like BE32 system.
+ */
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
+
+ tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
+ tcg_temp_free(taddr);
tcg_gen_mov_i64(cpu_exclusive_val, t64);
- tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ if (s->be_data == MO_BE) {
+ tcg_gen_extr_i64_i32(tmp2, tmp, t64);
+ } else {
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ }
tcg_temp_free_i64(t64);
store_reg(s, rt2, tmp2);
@@ -7954,15 +7972,26 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i64 n64 = tcg_temp_new_i64();
t2 = load_reg(s, rt2);
- tcg_gen_concat_i32_i64(n64, t1, t2);
+ /* For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. Since we're going to treat this as a
+ * single 64-bit BE store, we need to put the two halves in the
+ * opposite order for BE to LE, so that they end up in the right
+ * places.
+ * We don't want gen_aa32_frob64() because that does the wrong
+ * thing for BE32 usermode.
+ */
+ if (s->be_data == MO_BE) {
+ tcg_gen_concat_i32_i64(n64, t2, t1);
+ } else {
+ tcg_gen_concat_i32_i64(n64, t1, t2);
+ }
tcg_temp_free_i32(t2);
- gen_aa32_frob64(s, n64);
tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
get_mem_index(s), opc);
tcg_temp_free_i64(n64);
- gen_aa32_frob64(s, o64);
tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
tcg_gen_extrl_i64_i32(t0, o64);
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ecebc5a..045d661 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -4109,6 +4109,8 @@ static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
: env->hflags & HF_CS32_MASK ? CS_MODE_32
: CS_MODE_16);
+ info->cap_insn_unit = 1;
+ info->cap_insn_split = 8;
}
static Property x86_cpu_properties[] = {
diff --git a/target/ppc/compat.c b/target/ppc/compat.c
index f8729fe..ad8f93c 100644
--- a/target/ppc/compat.c
+++ b/target/ppc/compat.c
@@ -141,7 +141,7 @@ void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp)
cpu_synchronize_state(CPU(cpu));
if (kvm_enabled() && cpu->compat_pvr != compat_pvr) {
- int ret = kvmppc_set_compat(cpu, cpu->compat_pvr);
+ int ret = kvmppc_set_compat(cpu, compat_pvr);
if (ret < 0) {
error_setg_errno(errp, -ret,
"Unable to set CPU compatibility mode in KVM");
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index dee72a7..85d0a6c 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -3432,6 +3432,7 @@ static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
/* Adjust the arguments for the specific insn. */
switch (s->fields->op2) {
case 0x55: /* risbg */
+ case 0x59: /* risbgn */
i3 &= 63;
i4 &= 63;
pmask = ~0;
@@ -3447,7 +3448,7 @@ static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
pmask = 0x00000000ffffffffull;
break;
default:
- abort();
+ g_assert_not_reached();
}
/* MASK is the set of bits to be inserted from R2.
@@ -3464,11 +3465,7 @@ static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
insns, we need to keep the other half of the register. */
imask = ~mask | ~pmask;
if (do_zero) {
- if (s->fields->op2 == 0x55) {
- imask = 0;
- } else {
- imask = ~pmask;
- }
+ imask = ~pmask;
}
len = i4 - i3 + 1;