aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-05-05 13:52:22 -0500
committerRichard Henderson <richard.henderson@linaro.org>2022-05-05 13:52:22 -0500
commit31abf61c4929a91275fe32f1fafe6e6b3e840b2a (patch)
tree696cb00a00b9bbd23b650af483041cb0922fc996
parente91b8994115d2f093e7556c9af2d051a26a98cfb (diff)
parentbf3dd1e6d0d7c5c4906f89776e15dddc22af784b (diff)
downloadqemu-31abf61c4929a91275fe32f1fafe6e6b3e840b2a.zip
qemu-31abf61c4929a91275fe32f1fafe6e6b3e840b2a.tar.gz
qemu-31abf61c4929a91275fe32f1fafe6e6b3e840b2a.tar.bz2
Merge tag 'pull-ppc-20220505' of https://gitlab.com/danielhb/qemu into staging
ppc patch queue for 2022-05-05: The star of the show in this PR is the 'Remove hidden usages of *env' work done by VĂ­ctor, which impacts a lot of target/ppc code and we want to get it landed ASAP so future target/ppc contributions can be based on it. Other changes: - XIVE fixes in guest interrupt handling - BookE debug interrupt fix - vhost-user TARGET_PPC64 macro fix - valgrind fixes in kvmppc functions # -----BEGIN PGP SIGNATURE----- # # iHUEABYKAB0WIQQX6/+ZI9AYAK8oOBk82cqW3gMxZAUCYnQbpgAKCRA82cqW3gMx # ZM1ZAQChjU/oBVDlhrlfInGjOcdXlM4l0R0pDQZ6dm1NYVqcvgD/WRNWj+tQ2H1V # xmKXSzrGlDyYFu1uozfU8kvYJeHaKgw= # =eRrg # -----END PGP SIGNATURE----- # gpg: Signature made Thu 05 May 2022 01:47:02 PM CDT # gpg: using EDDSA key 17EBFF9923D01800AF2838193CD9CA96DE033164 # gpg: Good signature from "Daniel Henrique Barboza <danielhb413@gmail.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 17EB FF99 23D0 1800 AF28 3819 3CD9 CA96 DE03 3164 * tag 'pull-ppc-20220505' of https://gitlab.com/danielhb/qemu: (30 commits) target/ppc: Change MSR_* to follow POWER ISA numbering convention target/ppc: Add unused msr bits FIELDs target/ppc: Remove msr_de macro target/ppc: Remove msr_hv macro target/ppc: Remove msr_ts macro target/ppc: Remove msr_fe0 and msr_fe1 macros target/ppc: Remove msr_ep macro target/ppc: Remove msr_dr macro target/ppc: Remove msr_ir macro target/ppc: Remove msr_cm macro target/ppc: Remove msr_fp macro target/ppc: Remove msr_gs macro target/ppc: Remove msr_me macro target/ppc: Remove msr_pow macro target/ppc: Remove msr_ce macro target/ppc: Remove msr_ee macro target/ppc: Remove msr_ile macro target/ppc: Remove msr_ds macro target/ppc: Remove msr_le macro target/ppc: Remove msr_pr macro ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--hw/intc/xive.c25
-rw-r--r--hw/intc/xive2.c18
-rw-r--r--hw/ppc/pegasos2.c2
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/virtio/vhost-user.c2
-rw-r--r--include/hw/ppc/xive.h1
-rw-r--r--target/ppc/cpu.c2
-rw-r--r--target/ppc/cpu.h220
-rw-r--r--target/ppc/cpu_init.c23
-rw-r--r--target/ppc/excp_helper.c54
-rw-r--r--target/ppc/fpu_helper.c28
-rw-r--r--target/ppc/gdbstub.c2
-rw-r--r--target/ppc/helper_regs.c15
-rw-r--r--target/ppc/kvm.c16
-rw-r--r--target/ppc/machine.c2
-rw-r--r--target/ppc/mem_helper.c23
-rw-r--r--target/ppc/misc_helper.c2
-rw-r--r--target/ppc/mmu-radix64.c11
-rw-r--r--target/ppc/mmu_common.c40
-rw-r--r--target/ppc/mmu_helper.c6
20 files changed, 260 insertions, 234 deletions
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index b8e4c72..ae221fe 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -114,6 +114,17 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
}
}
+void xive_tctx_reset_os_signal(XiveTCTX *tctx)
+{
+ /*
+ * Lower the External interrupt. Used when pulling an OS
+ * context. It is necessary to avoid catching it in the hypervisor
+ * context. It should be raised again when re-pushing the OS
+ * context.
+ */
+ qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS));
+}
+
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
uint8_t *regs = &tctx->regs[ring];
@@ -388,6 +399,8 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
/* Invalidate CAM line */
qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
xive_tctx_set_os_cam(tctx, qw1w2_new);
+
+ xive_tctx_reset_os_signal(tctx);
return qw1w2;
}
@@ -413,10 +426,16 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
/* Reset the NVT value */
nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
-
- /* Merge in current context */
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
}
+ /*
+ * Always call xive_tctx_ipb_update(). Even if there were no
+ * escalation triggered, there could be a pending interrupt which
+ * was saved when the context was pulled and that we need to take
+ * into account by recalculating the PIPR (which is not
+ * saved/restored).
+ * It will also raise the External interrupt signal if needed.
+ */
+ xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
}
/*
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index 3aff42a..4d9ff41 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -269,6 +269,7 @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
}
+ xive_tctx_reset_os_signal(tctx);
return qw1w2;
}
@@ -316,7 +317,6 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
{
Xive2Nvp nvp;
uint8_t ipb;
- uint8_t cppr = 0;
/*
* Grab the associated thread interrupt context registers in the
@@ -337,7 +337,7 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
/* Automatically restore thread context registers */
if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
do_restore) {
- cppr = xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
+ xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
}
ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
@@ -345,11 +345,15 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
}
-
- /* An IPB or CPPR change can trigger a resend */
- if (ipb || cppr) {
- xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
- }
+ /*
+ * Always call xive_tctx_ipb_update(). Even if there were no
+ * escalation triggered, there could be a pending interrupt which
+ * was saved when the context was pulled and that we need to take
+ * into account by recalculating the PIPR (which is not
+ * saved/restored).
+ * It will also raise the External interrupt signal if needed.
+ */
+ xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
}
/*
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index 56bf203..9411ca6 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -461,7 +461,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
/* The TCG path should also be holding the BQL at this point */
g_assert(qemu_mutex_iothread_locked());
- if (msr_pr) {
+ if (FIELD_EX64(env->msr, MSR, PR)) {
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");
env->gpr[3] = H_PRIVILEGE;
} else if (env->gpr[3] == KVMPPC_H_RTAS) {
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 2256930..fe9937e 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1269,7 +1269,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
g_assert(!vhyp_cpu_in_nested(cpu));
- if (msr_pr) {
+ if (FIELD_EX64(env->msr, MSR, PR)) {
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
env->gpr[3] = H_PRIVILEGE;
} else {
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 2d434ff..afd51f7 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -51,7 +51,7 @@
#include "hw/acpi/acpi.h"
#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
-#elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
+#elif defined(TARGET_PPC) || defined(TARGET_PPC64)
#include "hw/ppc/spapr.h"
#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index 126e4e2..f7eea4c 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -527,6 +527,7 @@ Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
+void xive_tctx_reset_os_signal(XiveTCTX *tctx);
/*
* KVM XIVE device helpers
diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
index d7b42ba..401b6f9 100644
--- a/target/ppc/cpu.c
+++ b/target/ppc/cpu.c
@@ -88,7 +88,7 @@ static inline void fpscr_set_rounding_mode(CPUPPCState *env)
int rnd_type;
/* Set rounding mode */
- switch (fpscr_rn) {
+ switch (env->fpscr & FP_RN) {
case 0:
/* Best approximation (round to nearest) */
rnd_type = float_round_nearest_even;
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index c2b6c98..48596cf 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -25,6 +25,7 @@
#include "exec/cpu-defs.h"
#include "cpu-qom.h"
#include "qom/object.h"
+#include "hw/registerfields.h"
#define TCG_GUEST_DEFAULT_MO 0
@@ -37,6 +38,7 @@
#define PPC_ELF_MACHINE EM_PPC
#endif
+#define PPC_BIT_NR(bit) (63 - (bit))
#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
@@ -309,49 +311,106 @@ typedef enum {
/*****************************************************************************/
/* Machine state register bits definition */
-#define MSR_SF 63 /* Sixty-four-bit mode hflags */
-#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
-#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
-#define MSR_HV 60 /* hypervisor state hflags */
-#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */
-#define MSR_TS1 33
-#define MSR_TM 32 /* Transactional Memory Available (Book3s) */
-#define MSR_CM 31 /* Computation mode for BookE hflags */
-#define MSR_ICM 30 /* Interrupt computation mode for BookE */
-#define MSR_GS 28 /* guest state for BookE */
-#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
-#define MSR_VR 25 /* altivec available x hflags */
-#define MSR_SPE 25 /* SPE enable for BookE x hflags */
-#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
-#define MSR_S 22 /* Secure state */
-#define MSR_KEY 19 /* key bit on 603e */
-#define MSR_POW 18 /* Power management */
-#define MSR_WE 18 /* Wait State Enable on 405 */
-#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
-#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
-#define MSR_ILE 16 /* Interrupt little-endian mode */
-#define MSR_EE 15 /* External interrupt enable */
-#define MSR_PR 14 /* Problem state hflags */
-#define MSR_FP 13 /* Floating point available hflags */
-#define MSR_ME 12 /* Machine check interrupt enable */
-#define MSR_FE0 11 /* Floating point exception mode 0 */
-#define MSR_SE 10 /* Single-step trace enable x hflags */
-#define MSR_DWE 10 /* Debug wait enable on 405 x */
-#define MSR_UBLE 10 /* User BTB lock enable on e500 x */
-#define MSR_BE 9 /* Branch trace enable x hflags */
-#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */
-#define MSR_FE1 8 /* Floating point exception mode 1 */
-#define MSR_AL 7 /* AL bit on POWER */
-#define MSR_EP 6 /* Exception prefix on 601 */
-#define MSR_IR 5 /* Instruction relocate */
-#define MSR_DR 4 /* Data relocate */
-#define MSR_IS 5 /* Instruction address space (BookE) */
-#define MSR_DS 4 /* Data address space (BookE) */
-#define MSR_PE 3 /* Protection enable on 403 */
-#define MSR_PX 2 /* Protection exclusive on 403 x */
-#define MSR_PMM 2 /* Performance monitor mark on POWER x */
-#define MSR_RI 1 /* Recoverable interrupt 1 */
-#define MSR_LE 0 /* Little-endian mode 1 hflags */
+#define MSR_SF PPC_BIT_NR(0) /* Sixty-four-bit mode hflags */
+#define MSR_TAG PPC_BIT_NR(1) /* Tag-active mode (POWERx ?) */
+#define MSR_ISF PPC_BIT_NR(2) /* Sixty-four-bit interrupt mode on 630 */
+#define MSR_HV PPC_BIT_NR(3) /* hypervisor state hflags */
+#define MSR_TS0 PPC_BIT_NR(29) /* Transactional state, 2 bits (Book3s) */
+#define MSR_TS1 PPC_BIT_NR(30)
+#define MSR_TM PPC_BIT_NR(31) /* Transactional Memory Available (Book3s) */
+#define MSR_CM PPC_BIT_NR(32) /* Computation mode for BookE hflags */
+#define MSR_ICM PPC_BIT_NR(33) /* Interrupt computation mode for BookE */
+#define MSR_GS PPC_BIT_NR(35) /* guest state for BookE */
+#define MSR_UCLE PPC_BIT_NR(37) /* User-mode cache lock enable for BookE */
+#define MSR_VR PPC_BIT_NR(38) /* altivec available x hflags */
+#define MSR_SPE PPC_BIT_NR(38) /* SPE enable for BookE x hflags */
+#define MSR_VSX PPC_BIT_NR(40) /* Vector Scalar Extension (>= 2.06)x hflags */
+#define MSR_S PPC_BIT_NR(41) /* Secure state */
+#define MSR_KEY PPC_BIT_NR(44) /* key bit on 603e */
+#define MSR_POW PPC_BIT_NR(45) /* Power management */
+#define MSR_WE PPC_BIT_NR(45) /* Wait State Enable on 405 */
+#define MSR_TGPR PPC_BIT_NR(46) /* TGPR usage on 602/603 x */
+#define MSR_CE PPC_BIT_NR(46) /* Critical int. enable on embedded PPC x */
+#define MSR_ILE PPC_BIT_NR(47) /* Interrupt little-endian mode */
+#define MSR_EE PPC_BIT_NR(48) /* External interrupt enable */
+#define MSR_PR PPC_BIT_NR(49) /* Problem state hflags */
+#define MSR_FP PPC_BIT_NR(50) /* Floating point available hflags */
+#define MSR_ME PPC_BIT_NR(51) /* Machine check interrupt enable */
+#define MSR_FE0 PPC_BIT_NR(52) /* Floating point exception mode 0 */
+#define MSR_SE PPC_BIT_NR(53) /* Single-step trace enable x hflags */
+#define MSR_DWE PPC_BIT_NR(53) /* Debug wait enable on 405 x */
+#define MSR_UBLE PPC_BIT_NR(53) /* User BTB lock enable on e500 x */
+#define MSR_BE PPC_BIT_NR(54) /* Branch trace enable x hflags */
+#define MSR_DE PPC_BIT_NR(54) /* Debug int. enable on embedded PPC x */
+#define MSR_FE1 PPC_BIT_NR(55) /* Floating point exception mode 1 */
+#define MSR_AL PPC_BIT_NR(56) /* AL bit on POWER */
+#define MSR_EP PPC_BIT_NR(57) /* Exception prefix on 601 */
+#define MSR_IR PPC_BIT_NR(58) /* Instruction relocate */
+#define MSR_IS PPC_BIT_NR(58) /* Instruction address space (BookE) */
+#define MSR_DR PPC_BIT_NR(59) /* Data relocate */
+#define MSR_DS PPC_BIT_NR(59) /* Data address space (BookE) */
+#define MSR_PE PPC_BIT_NR(60) /* Protection enable on 403 */
+#define MSR_PX PPC_BIT_NR(61) /* Protection exclusive on 403 x */
+#define MSR_PMM PPC_BIT_NR(61) /* Performance monitor mark on POWER x */
+#define MSR_RI PPC_BIT_NR(62) /* Recoverable interrupt 1 */
+#define MSR_LE PPC_BIT_NR(63) /* Little-endian mode 1 hflags */
+
+FIELD(MSR, SF, MSR_SF, 1)
+FIELD(MSR, TAG, MSR_TAG, 1)
+FIELD(MSR, ISF, MSR_ISF, 1)
+#if defined(TARGET_PPC64)
+FIELD(MSR, HV, MSR_HV, 1)
+#define FIELD_EX64_HV(storage) FIELD_EX64(storage, MSR, HV)
+#else
+#define FIELD_EX64_HV(storage) 0
+#endif
+FIELD(MSR, TS0, MSR_TS0, 1)
+FIELD(MSR, TS1, MSR_TS1, 1)
+FIELD(MSR, TS, MSR_TS0, 2)
+FIELD(MSR, TM, MSR_TM, 1)
+FIELD(MSR, CM, MSR_CM, 1)
+FIELD(MSR, ICM, MSR_ICM, 1)
+FIELD(MSR, GS, MSR_GS, 1)
+FIELD(MSR, UCLE, MSR_UCLE, 1)
+FIELD(MSR, VR, MSR_VR, 1)
+FIELD(MSR, SPE, MSR_SPE, 1)
+FIELD(MSR, VSX, MSR_VSX, 1)
+FIELD(MSR, S, MSR_S, 1)
+FIELD(MSR, KEY, MSR_KEY, 1)
+FIELD(MSR, POW, MSR_POW, 1)
+FIELD(MSR, WE, MSR_WE, 1)
+FIELD(MSR, TGPR, MSR_TGPR, 1)
+FIELD(MSR, CE, MSR_CE, 1)
+FIELD(MSR, ILE, MSR_ILE, 1)
+FIELD(MSR, EE, MSR_EE, 1)
+FIELD(MSR, PR, MSR_PR, 1)
+FIELD(MSR, FP, MSR_FP, 1)
+FIELD(MSR, ME, MSR_ME, 1)
+FIELD(MSR, FE0, MSR_FE0, 1)
+FIELD(MSR, SE, MSR_SE, 1)
+FIELD(MSR, DWE, MSR_DWE, 1)
+FIELD(MSR, UBLE, MSR_UBLE, 1)
+FIELD(MSR, BE, MSR_BE, 1)
+FIELD(MSR, DE, MSR_DE, 1)
+FIELD(MSR, FE1, MSR_FE1, 1)
+FIELD(MSR, AL, MSR_AL, 1)
+FIELD(MSR, EP, MSR_EP, 1)
+FIELD(MSR, IR, MSR_IR, 1)
+FIELD(MSR, DR, MSR_DR, 1)
+FIELD(MSR, IS, MSR_IS, 1)
+FIELD(MSR, DS, MSR_DS, 1)
+FIELD(MSR, PE, MSR_PE, 1)
+FIELD(MSR, PX, MSR_PX, 1)
+FIELD(MSR, PMM, MSR_PMM, 1)
+FIELD(MSR, RI, MSR_RI, 1)
+FIELD(MSR, LE, MSR_LE, 1)
+
+/*
+ * FE0 and FE1 bits are not side-by-side
+ * so we can't combine them using FIELD()
+ */
+#define FIELD_EX64_FE(msr) \
+ ((FIELD_EX64(msr, MSR, FE0) << 1) | FIELD_EX64(msr, MSR, FE1))
/* PMU bits */
#define MMCR0_FC PPC_BIT(32) /* Freeze Counters */
@@ -463,50 +522,6 @@ typedef enum {
#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */
#define HFSCR_IC_MSGP 0xA
-#define msr_sf ((env->msr >> MSR_SF) & 1)
-#define msr_isf ((env->msr >> MSR_ISF) & 1)
-#if defined(TARGET_PPC64)
-#define msr_hv ((env->msr >> MSR_HV) & 1)
-#else
-#define msr_hv (0)
-#endif
-#define msr_cm ((env->msr >> MSR_CM) & 1)
-#define msr_icm ((env->msr >> MSR_ICM) & 1)
-#define msr_gs ((env->msr >> MSR_GS) & 1)
-#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
-#define msr_vr ((env->msr >> MSR_VR) & 1)
-#define msr_spe ((env->msr >> MSR_SPE) & 1)
-#define msr_vsx ((env->msr >> MSR_VSX) & 1)
-#define msr_key ((env->msr >> MSR_KEY) & 1)
-#define msr_pow ((env->msr >> MSR_POW) & 1)
-#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
-#define msr_ce ((env->msr >> MSR_CE) & 1)
-#define msr_ile ((env->msr >> MSR_ILE) & 1)
-#define msr_ee ((env->msr >> MSR_EE) & 1)
-#define msr_pr ((env->msr >> MSR_PR) & 1)
-#define msr_fp ((env->msr >> MSR_FP) & 1)
-#define msr_me ((env->msr >> MSR_ME) & 1)
-#define msr_fe0 ((env->msr >> MSR_FE0) & 1)
-#define msr_se ((env->msr >> MSR_SE) & 1)
-#define msr_dwe ((env->msr >> MSR_DWE) & 1)
-#define msr_uble ((env->msr >> MSR_UBLE) & 1)
-#define msr_be ((env->msr >> MSR_BE) & 1)
-#define msr_de ((env->msr >> MSR_DE) & 1)
-#define msr_fe1 ((env->msr >> MSR_FE1) & 1)
-#define msr_al ((env->msr >> MSR_AL) & 1)
-#define msr_ep ((env->msr >> MSR_EP) & 1)
-#define msr_ir ((env->msr >> MSR_IR) & 1)
-#define msr_dr ((env->msr >> MSR_DR) & 1)
-#define msr_is ((env->msr >> MSR_IS) & 1)
-#define msr_ds ((env->msr >> MSR_DS) & 1)
-#define msr_pe ((env->msr >> MSR_PE) & 1)
-#define msr_px ((env->msr >> MSR_PX) & 1)
-#define msr_pmm ((env->msr >> MSR_PMM) & 1)
-#define msr_ri ((env->msr >> MSR_RI) & 1)
-#define msr_le ((env->msr >> MSR_LE) & 1)
-#define msr_ts ((env->msr >> MSR_TS1) & 3)
-#define msr_tm ((env->msr >> MSR_TM) & 1)
-
#define DBCR0_ICMP (1 << 27)
#define DBCR0_BRT (1 << 26)
#define DBSR_ICMP (1 << 27)
@@ -713,41 +728,12 @@ enum {
#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
#define FPSCR_RN1 1
#define FPSCR_RN0 0 /* Floating-point rounding control */
-#define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0)
-#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
-#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
-#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
-#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1)
-#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1)
-#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1)
-#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1)
-#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1)
-#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1)
-#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1)
-#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1)
-#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1)
-#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF)
-#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1)
-#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1)
-#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1)
-#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1)
-#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1)
-#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1)
-#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
-#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
-#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
-#define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3)
/* Invalid operation exception summary */
#define FPSCR_IX ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
(1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
(1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \
(1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \
(1 << FPSCR_VXCVI))
-/* exception summary */
-#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F)
-/* enabled exception summary */
-#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
- 0x1F)
#define FP_DRN2 (1ull << FPSCR_DRN2)
#define FP_DRN1 (1ull << FPSCR_DRN1)
@@ -2726,7 +2712,7 @@ static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu, bool hv)
} else if (pcc->lpcr_mask & LPCR_ILE) {
ile = !!(env->spr[SPR_LPCR] & LPCR_ILE);
} else {
- ile = !!(msr_ile);
+ ile = FIELD_EX64(env->msr, MSR, ILE);
}
return ile;
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index d42e2ba..d4c7813 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -5949,7 +5949,8 @@ static bool cpu_has_work_POWER7(CPUState *cs)
}
return false;
} else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ return FIELD_EX64(env->msr, MSR, EE) &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
}
@@ -6120,7 +6121,8 @@ static bool cpu_has_work_POWER8(CPUState *cs)
}
return false;
} else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ return FIELD_EX64(env->msr, MSR, EE) &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
}
@@ -6303,7 +6305,8 @@ static bool cpu_has_work_POWER9(CPUState *cs)
if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
(env->spr[SPR_LPCR] & LPCR_EEE)) {
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
- if (heic == 0 || !msr_hv || msr_pr) {
+ if (!heic || !FIELD_EX64_HV(env->msr) ||
+ FIELD_EX64(env->msr, MSR, PR)) {
return true;
}
}
@@ -6337,7 +6340,8 @@ static bool cpu_has_work_POWER9(CPUState *cs)
}
return false;
} else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ return FIELD_EX64(env->msr, MSR, EE) &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
}
@@ -6517,7 +6521,8 @@ static bool cpu_has_work_POWER10(CPUState *cs)
if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
(env->spr[SPR_LPCR] & LPCR_EEE)) {
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
- if (heic == 0 || !msr_hv || msr_pr) {
+ if (!heic || !FIELD_EX64_HV(env->msr) ||
+ FIELD_EX64(env->msr, MSR, PR)) {
return true;
}
}
@@ -6551,7 +6556,8 @@ static bool cpu_has_work_POWER10(CPUState *cs)
}
return false;
} else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ return FIELD_EX64(env->msr, MSR, EE) &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
}
@@ -7119,7 +7125,8 @@ static bool ppc_cpu_has_work(CPUState *cs)
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ return FIELD_EX64(env->msr, MSR, EE) &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
static void ppc_cpu_reset(DeviceState *dev)
@@ -7210,7 +7217,7 @@ static bool ppc_cpu_is_big_endian(CPUState *cs)
cpu_synchronize_state(cs);
- return !msr_le;
+ return !FIELD_EX64(env->msr, MSR, LE);
}
#ifdef CONFIG_TCG
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index d3e2cfc..cb752b1 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -444,7 +444,7 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
srr1 = SPR_40x_SRR3;
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -478,7 +478,7 @@ static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -575,7 +575,7 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_CRITICAL: /* Critical input */
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -615,7 +615,7 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -661,7 +661,7 @@ static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_ITLB: /* Instruction TLB error */
break;
case POWERPC_EXCP_RESET: /* System reset exception */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
cpu_abort(cs, "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
@@ -748,7 +748,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
switch (excp) {
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -788,7 +788,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -853,7 +853,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_DECR: /* Decrementer exception */
break;
case POWERPC_EXCP_RESET: /* System reset exception */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
cpu_abort(cs, "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
@@ -933,7 +933,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
switch (excp) {
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -973,7 +973,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -1038,7 +1038,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_DECR: /* Decrementer exception */
break;
case POWERPC_EXCP_RESET: /* System reset exception */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
cpu_abort(cs, "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
@@ -1128,7 +1128,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
srr1 = SPR_BOOKE_CSRR1;
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -1171,7 +1171,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -1248,7 +1248,7 @@ static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
break;
case POWERPC_EXCP_RESET: /* System reset exception */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
cpu_abort(cs, "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
@@ -1366,7 +1366,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
switch (excp) {
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
+ if (!FIELD_EX64(env->msr, MSR, ME)) {
/*
* Machine check exception is not enabled. Enter
* checkstop state.
@@ -1434,7 +1434,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
trace_ppc_excp_fp_ignore();
powerpc_reset_excp_state(cpu);
return;
@@ -1507,7 +1507,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
break;
case POWERPC_EXCP_RESET: /* System reset exception */
/* A power-saving exception sets ME, otherwise it is unchanged */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
/* indicate that we resumed from power save mode */
msr |= 0x10000;
new_msr |= ((target_ulong)1 << MSR_ME);
@@ -1519,7 +1519,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
*/
new_msr |= (target_ulong)MSR_HVB;
} else {
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
cpu_abort(cs, "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
@@ -1709,13 +1709,13 @@ static void ppc_hw_interrupt(CPUPPCState *env)
* clear when coming out of some power management states (in order
* for them to become a 0x100).
*/
- async_deliver = (msr_ee != 0) || env->resume_as_sreset;
+ async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
/* Hypervisor decrementer exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
- if ((async_deliver || msr_hv == 0) && hdice) {
+ if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
powerpc_excp(cpu, POWERPC_EXCP_HDECR);
@@ -1727,7 +1727,7 @@ static void ppc_hw_interrupt(CPUPPCState *env)
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
/* LPCR will be clear when not supported so this will work */
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
- if ((async_deliver || msr_hv == 0) && hvice) {
+ if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
return;
}
@@ -1738,8 +1738,9 @@ static void ppc_hw_interrupt(CPUPPCState *env)
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
- if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
- (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
+ if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
+ !FIELD_EX64(env->msr, MSR, PR))) ||
+ (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
if (books_vhyp_promotes_external_to_hvirt(cpu)) {
powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
} else {
@@ -1748,7 +1749,7 @@ static void ppc_hw_interrupt(CPUPPCState *env)
return;
}
}
- if (msr_ce != 0) {
+ if (FIELD_EX64(env->msr, MSR, CE)) {
/* External critical interrupt */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
@@ -1818,7 +1819,8 @@ static void ppc_hw_interrupt(CPUPPCState *env)
* EBB exception must be taken in problem state and
* with BESCR_GE set.
*/
- if (msr_pr == 1 && env->spr[SPR_BESCR] & BESCR_GE) {
+ if (FIELD_EX64(env->msr, MSR, PR) &&
+ (env->spr[SPR_BESCR] & BESCR_GE)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EBB);
if (env->spr[SPR_BESCR] & BESCR_PMEO) {
@@ -2094,7 +2096,7 @@ static void do_ebb(CPUPPCState *env, int ebb_excp)
env->spr[SPR_BESCR] |= BESCR_EEO;
}
- if (msr_pr == 1) {
+ if (FIELD_EX64(env->msr, MSR, PR)) {
powerpc_excp(cpu, ebb_excp);
} else {
env->pending_interrupts |= 1 << PPC_INTERRUPT_EBB;
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 99281cc..f6c8318 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -202,7 +202,7 @@ static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
env->fpscr |= FP_VX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ve != 0) {
+ if (env->fpscr & FP_VE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
if (fp_exceptions_enabled(env)) {
@@ -216,7 +216,7 @@ static void finish_invalid_op_arith(CPUPPCState *env, int op,
bool set_fpcc, uintptr_t retaddr)
{
env->fpscr &= ~(FP_FR | FP_FI);
- if (fpscr_ve == 0) {
+ if (!(env->fpscr & FP_VE)) {
if (set_fpcc) {
env->fpscr &= ~FP_FPCC;
env->fpscr |= (FP_C | FP_FU);
@@ -286,7 +286,7 @@ static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
/* We must update the target FPR before raising the exception */
- if (fpscr_ve != 0) {
+ if (env->fpscr & FP_VE) {
CPUState *cs = env_cpu(env);
cs->exception_index = POWERPC_EXCP_PROGRAM;
@@ -303,7 +303,7 @@ static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
{
env->fpscr |= FP_VXCVI;
env->fpscr &= ~(FP_FR | FP_FI);
- if (fpscr_ve == 0) {
+ if (!(env->fpscr & FP_VE)) {
if (set_fpcc) {
env->fpscr &= ~FP_FPCC;
env->fpscr |= (FP_C | FP_FU);
@@ -318,7 +318,7 @@ static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
env->fpscr &= ~(FP_FR | FP_FI);
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ze != 0) {
+ if (env->fpscr & FP_ZE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
if (fp_exceptions_enabled(env)) {
@@ -336,7 +336,7 @@ static inline void float_overflow_excp(CPUPPCState *env)
env->fpscr |= FP_OX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_oe != 0) {
+ if (env->fpscr & FP_OE) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
@@ -356,7 +356,7 @@ static inline void float_underflow_excp(CPUPPCState *env)
env->fpscr |= FP_UX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ue != 0) {
+ if (env->fpscr & FP_UE) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
@@ -374,7 +374,7 @@ static inline void float_inexact_excp(CPUPPCState *env)
env->fpscr |= FP_XX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_xe != 0) {
+ if (env->fpscr & FP_XE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
/* We must update the target FPR before raising the exception */
@@ -2274,7 +2274,7 @@ VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
vxvc = svxvc; \
if (flags & float_flag_invalid_snan) { \
float_invalid_op_vxsnan(env, GETPC()); \
- vxvc &= fpscr_ve == 0; \
+ vxvc &= !(env->fpscr & FP_VE); \
} \
if (vxvc) { \
float_invalid_op_vxvc(env, 0, GETPC()); \
@@ -2375,7 +2375,7 @@ static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
vxsnan_flag = true;
- if (fpscr_ve == 0 && ordered) {
+ if (!(env->fpscr & FP_VE) && ordered) {
vxvc_flag = true;
}
} else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
@@ -2440,7 +2440,7 @@ static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
float128_is_signaling_nan(xb->f128, &env->fp_status)) {
vxsnan_flag = true;
- if (fpscr_ve == 0 && ordered) {
+ if (!(env->fpscr & FP_VE) && ordered) {
vxvc_flag = true;
}
} else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
@@ -2590,7 +2590,7 @@ void helper_##name(CPUPPCState *env, \
t.VsrD(0) = xb->VsrD(0); \
} \
\
- vex_flag = fpscr_ve & vxsnan_flag; \
+ vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
@@ -3320,7 +3320,7 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
if (r == 0 && rmc == 0) {
rmode = float_round_ties_away;
} else if (r == 0 && rmc == 0x3) {
- rmode = fpscr_rn;
+ rmode = env->fpscr & FP_RN;
} else if (r == 1) {
switch (rmc) {
case 0:
@@ -3374,7 +3374,7 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
if (r == 0 && rmc == 0) {
rmode = float_round_ties_away;
} else if (r == 0 && rmc == 0x3) {
- rmode = fpscr_rn;
+ rmode = env->fpscr & FP_RN;
} else if (r == 1) {
switch (rmc) {
case 0:
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
index 1252429..1a0b9ca 100644
--- a/target/ppc/gdbstub.c
+++ b/target/ppc/gdbstub.c
@@ -95,7 +95,7 @@ static int ppc_gdb_register_len(int n)
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
- if (!msr_le) {
+ if (!FIELD_EX64(env->msr, MSR, LE)) {
/* do nothing */
} else if (len == 4) {
bswap32s((uint32_t *)mem_buf);
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index 9a691d6..6159a15 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -63,10 +63,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
if (ppc_flags & POWERPC_FLAG_DE) {
target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
- if (dbcr0 & DBCR0_ICMP) {
+ if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) {
hflags |= 1 << HFLAGS_SE;
}
- if (dbcr0 & DBCR0_BRT) {
+ if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) {
hflags |= 1 << HFLAGS_BE;
}
} else {
@@ -227,13 +227,12 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
value &= ~MSR_HVB;
value |= env->msr & MSR_HVB;
}
- if (((value >> MSR_IR) & 1) != msr_ir ||
- ((value >> MSR_DR) & 1) != msr_dr) {
+ if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) {
cpu_interrupt_exittb(cs);
}
if ((env->mmu_model == POWERPC_MMU_BOOKE ||
env->mmu_model == POWERPC_MMU_BOOKE206) &&
- ((value >> MSR_GS) & 1) != msr_gs) {
+ ((value ^ env->msr) & R_MSR_GS_MASK)) {
cpu_interrupt_exittb(cs);
}
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
@@ -241,8 +240,8 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
/* Swap temporary saved registers with GPRs */
hreg_swap_gpr_tgpr(env);
}
- if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
- env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
+ if (unlikely((value ^ env->msr) & R_MSR_EP_MASK)) {
+ env->excp_prefix = FIELD_EX64(value, MSR, EP) * 0xFFF00000;
}
/*
* If PR=1 then EE, IR and DR must be 1
@@ -261,7 +260,7 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
env->msr = value;
hreg_compute_hflags(env);
#if !defined(CONFIG_USER_ONLY)
- if (unlikely(msr_pow == 1)) {
+ if (unlikely(FIELD_EX64(env->msr, MSR, POW))) {
if (!env->pending_interrupts && (*env->check_pow)(env)) {
cs->halted = 1;
excp = EXCP_HALTED;
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index a3c31b4..6eed466 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -266,7 +266,7 @@ struct ppc_radix_page_info *kvm_get_radix_page_info(void)
{
KVMState *s = KVM_STATE(current_accel());
struct ppc_radix_page_info *radix_page_info;
- struct kvm_ppc_rmmu_info rmmu_info;
+ struct kvm_ppc_rmmu_info rmmu_info = { };
int i;
if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
@@ -542,10 +542,11 @@ static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
+ /* Init 'val' to avoid "uninitialised value" Valgrind warnings */
union {
uint32_t u32;
uint64_t u64;
- } val;
+ } val = { };
struct kvm_one_reg reg = {
.id = id,
.addr = (uintptr_t) &val,
@@ -849,7 +850,7 @@ static int kvm_put_vpa(CPUState *cs)
int kvmppc_put_books_sregs(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
- struct kvm_sregs sregs;
+ struct kvm_sregs sregs = { };
int i;
sregs.pvr = env->spr[SPR_PVR];
@@ -973,7 +974,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
#ifdef TARGET_PPC64
- if (msr_ts) {
+ if (FIELD_EX64(env->msr, MSR, TS)) {
for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
}
@@ -1281,7 +1282,7 @@ int kvm_arch_get_registers(CPUState *cs)
}
#ifdef TARGET_PPC64
- if (msr_ts) {
+ if (FIELD_EX64(env->msr, MSR, TS)) {
for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
}
@@ -1351,7 +1352,8 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ FIELD_EX64(env->msr, MSR, EE)) {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
}
@@ -2536,7 +2538,7 @@ int kvmppc_get_cap_large_decr(void)
int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
{
CPUState *cs = CPU(cpu);
- uint64_t lpcr;
+ uint64_t lpcr = 0;
kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
/* Do we need to modify the LPCR? */
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index e673944..7104a5c 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -417,7 +417,7 @@ static bool tm_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
- return msr_ts;
+ return FIELD_EX64(env->msr, MSR, TS);
}
static const VMStateDescription vmstate_tm = {
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index c4ff8fd..d1163f3 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -33,9 +33,9 @@
static inline bool needs_byteswap(const CPUPPCState *env)
{
#if TARGET_BIG_ENDIAN
- return msr_le;
+ return FIELD_EX64(env->msr, MSR, LE);
#else
- return !msr_le;
+ return !FIELD_EX64(env->msr, MSR, LE);
#endif
}
@@ -470,8 +470,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
#endif
/*
- * We use msr_le to determine index ordering in a vector. However,
- * byteswapping is not simply controlled by msr_le. We also need to
+ * We use MSR_LE to determine index ordering in a vector. However,
+ * byteswapping is not simply controlled by MSR_LE. We also need to
* take into account endianness of the target. This is done for the
* little-endian PPC64 user-mode target.
*/
@@ -484,7 +484,7 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
- if (msr_le) { \
+ if (FIELD_EX64(env->msr, MSR, LE)) { \
index = n_elems - index - 1; \
} \
\
@@ -511,7 +511,7 @@ LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
- if (msr_le) { \
+ if (FIELD_EX64(env->msr, MSR, LE)) { \
index = n_elems - index - 1; \
} \
\
@@ -545,7 +545,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
t.s128 = int128_zero(); \
if (nb) { \
nb = (nb >= 16) ? 16 : nb; \
- if (msr_le && !lj) { \
+ if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
@@ -576,7 +576,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
} \
\
nb = (nb >= 16) ? 16 : nb; \
- if (msr_le && !lj) { \
+ if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
addr = addr_add(env, addr, 1); \
@@ -612,11 +612,12 @@ void helper_tbegin(CPUPPCState *env)
env->spr[SPR_TEXASR] =
(1ULL << TEXASR_FAILURE_PERSISTENT) |
(1ULL << TEXASR_NESTING_OVERFLOW) |
- (msr_hv << TEXASR_PRIVILEGE_HV) |
- (msr_pr << TEXASR_PRIVILEGE_PR) |
+ (FIELD_EX64_HV(env->msr) << TEXASR_PRIVILEGE_HV) |
+ (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) |
(1ULL << TEXASR_FAILURE_SUMMARY) |
(1ULL << TEXASR_TFIAR_EXACT);
- env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
+ env->spr[SPR_TFIAR] = env->nip | (FIELD_EX64_HV(env->msr) << 1) |
+ FIELD_EX64(env->msr, MSR, PR);
env->spr[SPR_TFHAR] = env->nip + 4;
env->crf[0] = 0xB; /* 0b1010 = transaction failure */
}
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index 06aa716..b0a5e7c 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -73,7 +73,7 @@ void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
const char *caller, uint32_t cause)
{
#ifdef TARGET_PPC64
- if ((env->msr_mask & MSR_HVB) && !msr_hv &&
+ if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) &&
!(env->spr[SPR_HFSCR] & (1UL << bit))) {
raise_hv_fu_exception(env, bit, caller, cause, GETPC());
}
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 5414fd6..21ac958 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -37,7 +37,7 @@ static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
return false;
}
- if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
+ if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
switch (eaddr & R_EADDR_QUADRANT) {
case R_EADDR_QUADRANT0:
*lpid = 0;
@@ -191,12 +191,13 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
}
/* Determine permissions allowed by Encoded Access Authority */
- if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
+ if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
+ FIELD_EX64(env->msr, MSR, PR)) {
*prot = 0;
} else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
partition_scoped) {
*prot = ppc_radix64_get_prot_eaa(pte);
- } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
+ } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
*prot = ppc_radix64_get_prot_eaa(pte);
*prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
}
@@ -305,7 +306,7 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
if (!(pate->dw0 & PATE0_HR)) {
return false;
}
- if (lpid == 0 && !msr_hv) {
+ if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
return false;
}
if ((pate->dw0 & PATE1_R_PRTS) < 5) {
@@ -430,7 +431,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
*g_page_size = PRTBE_R_GET_RTS(prtbe0);
base_addr = prtbe0 & PRTBE_R_RPDB;
nls = prtbe0 & PRTBE_R_RPDS;
- if (msr_hv || vhyp_flat_addressing(cpu)) {
+ if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
/*
* Can treat process table addresses as real addresses
*/
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index e9c5b14..89107a6 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -273,8 +273,8 @@ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
bl = (*BATu & 0x00001FFC) << 15;
valid = 0;
prot = 0;
- if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
- ((msr_pr != 0) && (*BATu & 0x00000001))) {
+ if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) ||
+ (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) {
valid = 1;
pp = *BATl & 0x00000003;
if (pp != 0) {
@@ -368,16 +368,17 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
PowerPCCPU *cpu = env_archcpu(env);
hwaddr hash;
target_ulong vsid;
- int ds, pr, target_page_bits;
+ int ds, target_page_bits;
+ bool pr;
int ret;
target_ulong sr, pgidx;
- pr = msr_pr;
+ pr = FIELD_EX64(env->msr, MSR, PR);
ctx->eaddr = eaddr;
sr = env->sr[eaddr >> 28];
- ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
- ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
+ ctx->key = (((sr & 0x20000000) && pr) ||
+ ((sr & 0x40000000) && !pr)) ? 1 : 0;
ds = sr & 0x80000000 ? 1 : 0;
ctx->nx = sr & 0x10000000 ? 1 : 0;
vsid = sr & 0x00FFFFFF;
@@ -386,8 +387,9 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
"Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
" nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
" ir=%d dr=%d pr=%d %d t=%d\n",
- eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
- (int)msr_dr, pr != 0 ? 1 : 0,
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr,
+ (int)FIELD_EX64(env->msr, MSR, IR),
+ (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0,
access_type == MMU_DATA_STORE, type);
pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
hash = vsid ^ pgidx;
@@ -530,7 +532,7 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
ret = -1;
raddr = (hwaddr)-1ULL;
- pr = msr_pr;
+ pr = FIELD_EX64(env->msr, MSR, PR);
for (i = 0; i < env->nb_tlb; i++) {
tlb = &env->tlb.tlbe[i];
if (ppcemb_tlb_check(env, tlb, &raddr, address,
@@ -618,14 +620,16 @@ static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
found_tlb:
- if (msr_pr != 0) {
+ if (FIELD_EX64(env->msr, MSR, PR)) {
prot2 = tlb->prot & 0xF;
} else {
prot2 = (tlb->prot >> 4) & 0xF;
}
/* Check the address space */
- if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
+ if ((access_type == MMU_INST_FETCH ?
+ FIELD_EX64(env->msr, MSR, IR) :
+ FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) {
qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
return -1;
}
@@ -691,7 +695,7 @@ int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
hwaddr mask;
uint32_t tlb_pid;
- if (!msr_cm) {
+ if (!FIELD_EX64(env->msr, MSR, CM)) {
/* In 32bit mode we can only address 32bit EAs */
address = (uint32_t)address;
}
@@ -767,8 +771,8 @@ static bool mmubooke206_get_as(CPUPPCState *env,
*pr_out = !!(epidr & EPID_EPR);
return true;
} else {
- *as_out = msr_ds;
- *pr_out = msr_pr;
+ *as_out = FIELD_EX64(env->msr, MSR, DS);
+ *pr_out = FIELD_EX64(env->msr, MSR, PR);
return false;
}
}
@@ -838,7 +842,7 @@ found_tlb:
if (access_type == MMU_INST_FETCH) {
/* There is no way to fetch code using epid load */
assert(!use_epid);
- as = msr_ir;
+ as = FIELD_EX64(env->msr, MSR, IR);
}
if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
@@ -1168,8 +1172,8 @@ int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
int mmu_idx)
{
int ret = -1;
- bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
- || (type != ACCESS_CODE && msr_dr == 0);
+ bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) ||
+ (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR));
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
@@ -1230,7 +1234,7 @@ static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
if (access_type == MMU_INST_FETCH) {
- as = msr_ir;
+ as = FIELD_EX64(env->msr, MSR, IR);
}
env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 142a717..15239dc 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -935,7 +935,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
- !msr_gs) {
+ !FIELD_EX64(env->msr, MSR, GS)) {
/* XXX we don't support direct LRAT setting yet */
fprintf(stderr, "cpu: don't support LRAT setting yet\n");
return;
@@ -962,7 +962,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
POWERPC_EXCP_INVAL_INVAL, GETPC());
}
- if (msr_gs) {
+ if (FIELD_EX64(env->msr, MSR, GS)) {
cpu_abort(env_cpu(env), "missing HV implementation\n");
}
@@ -1003,7 +1003,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
/* Add a mask for page attributes */
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
- if (!msr_cm) {
+ if (!FIELD_EX64(env->msr, MSR, CM)) {
/*
* Executing a tlbwe instruction in 32-bit mode will set bits
* 0:31 of the TLB EPN field to zero.