aboutsummaryrefslogtreecommitdiff
path: root/target/ppc
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-04-27 21:34:46 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-04-27 21:34:46 +0100
commit9ec34ecc97bcd5df04b0f67a774d79ffcd6b0a11 (patch)
tree6cf993ec0d442d43c116a42e69e3382a9f797b5a /target/ppc
parentdb7f1c3fafa8e1d23ecb212454f9d83ac59e411b (diff)
parentaaef873b130f4f9c78f8e97b69c235c81b8b8b88 (diff)
downloadqemu-9ec34ecc97bcd5df04b0f67a774d79ffcd6b0a11.zip
qemu-9ec34ecc97bcd5df04b0f67a774d79ffcd6b0a11.tar.gz
qemu-9ec34ecc97bcd5df04b0f67a774d79ffcd6b0a11.tar.bz2
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190426' into staging
ppc patch queue 2019-04-26 Here's the first ppc target pull request for qemu-4.1. This has a number of things that have accumulated while qemu-4.0 was frozen. * A number of emulated MMU improvements from Ben Herrenschmidt * Assorted cleanups fro Greg Kurz * A large set of mostly mechanical cleanups from me to make target/ppc much closer to compliant with the modern coding style * Support for passthrough of NVIDIA GPUs using NVLink2 As well as some other assorted fixes. # gpg: Signature made Fri 26 Apr 2019 07:02:19 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.1-20190426: (36 commits) target/ppc: improve performance of large BAT invalidations ppc/hash32: Rework R and C bit updates ppc/hash64: Rework R and C bit updates ppc/spapr: Use proper HPTE accessors for H_READ target/ppc: Don't check UPRT in radix mode when in HV real mode target/ppc/kvm: Convert DPRINTF to traces target/ppc/trace-events: Fix trivial typo spapr: Drop duplicate PCI swizzle code spapr_pci: Get rid of duplicate code for node name creation target/ppc: Style fixes for translate/spe-impl.inc.c target/ppc: Style fixes for translate/vmx-impl.inc.c target/ppc: Style fixes for translate/vsx-impl.inc.c target/ppc: Style fixes for translate/fp-impl.inc.c target/ppc: Style fixes for translate.c target/ppc: Style fixes for translate_init.inc.c target/ppc: Style fixes for monitor.c target/ppc: Style fixes for mmu_helper.c target/ppc: Style fixes for mmu-hash64.[ch] target/ppc: Style fixes for mmu-hash32.[ch] target/ppc: Style fixes for misc_helper.c ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/ppc')
-rw-r--r--target/ppc/cpu-models.c2
-rw-r--r--target/ppc/cpu-models.h3
-rw-r--r--target/ppc/cpu.h241
-rw-r--r--target/ppc/dfp_helper.c14
-rw-r--r--target/ppc/excp_helper.c87
-rw-r--r--target/ppc/fpu_helper.c134
-rw-r--r--target/ppc/gdbstub.c34
-rw-r--r--target/ppc/helper_regs.h10
-rw-r--r--target/ppc/int_helper.c70
-rw-r--r--target/ppc/kvm.c244
-rw-r--r--target/ppc/kvm_ppc.h3
-rw-r--r--target/ppc/machine.c106
-rw-r--r--target/ppc/mem_helper.c33
-rw-r--r--target/ppc/mfrom_table.inc.c3
-rw-r--r--target/ppc/mfrom_table_gen.c8
-rw-r--r--target/ppc/misc_helper.c9
-rw-r--r--target/ppc/mmu-hash32.c59
-rw-r--r--target/ppc/mmu-hash64.c136
-rw-r--r--target/ppc/mmu-hash64.h2
-rw-r--r--target/ppc/mmu-radix64.c16
-rw-r--r--target/ppc/mmu_helper.c144
-rw-r--r--target/ppc/monitor.c11
-rw-r--r--target/ppc/trace-events29
-rw-r--r--target/ppc/translate.c504
-rw-r--r--target/ppc/translate/fp-impl.inc.c52
-rw-r--r--target/ppc/translate/spe-impl.inc.c14
-rw-r--r--target/ppc/translate/vmx-impl.inc.c26
-rw-r--r--target/ppc/translate/vsx-impl.inc.c15
-rw-r--r--target/ppc/translate_init.inc.c243
29 files changed, 1361 insertions, 891 deletions
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
index 7c75963..9d7050b 100644
--- a/target/ppc/cpu-models.c
+++ b/target/ppc/cpu-models.c
@@ -740,7 +740,7 @@
POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455,
"PowerPC 7457A v1.2 (G4)")
/* 64 bits PowerPC */
-#if defined (TARGET_PPC64)
+#if defined(TARGET_PPC64)
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
index efdb2fa..4fdb730 100644
--- a/target/ppc/cpu-models.h
+++ b/target/ppc/cpu-models.h
@@ -393,7 +393,8 @@ enum {
CPU_POWERPC_RS64IV = 0x00370000,
#endif /* defined(TARGET_PPC64) */
/* Original POWER */
- /* XXX: should be POWER (RIOS), RSC3308, RSC4608,
+ /*
+ * XXX: should be POWER (RIOS), RSC3308, RSC4608,
* POWER2 (RIOS2) & RSC2 (P2SC) here
*/
/* PA Semi core */
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index d5259f7..5e7cf54 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -23,23 +23,28 @@
#include "qemu-common.h"
#include "qemu/int128.h"
-//#define PPC_EMULATE_32BITS_HYPV
+/* #define PPC_EMULATE_32BITS_HYPV */
-#if defined (TARGET_PPC64)
+#if defined(TARGET_PPC64)
/* PowerPC 64 definitions */
#define TARGET_LONG_BITS 64
#define TARGET_PAGE_BITS 12
#define TCG_GUEST_DEFAULT_MO 0
-/* Note that the official physical address space bits is 62-M where M
- is implementation dependent. I've not looked up M for the set of
- cpus we emulate at the system level. */
+/*
+ * Note that the official physical address space bits is 62-M where M
+ * is implementation dependent. I've not looked up M for the set of
+ * cpus we emulate at the system level.
+ */
#define TARGET_PHYS_ADDR_SPACE_BITS 62
-/* Note that the PPC environment architecture talks about 80 bit virtual
- addresses, with segmentation. Obviously that's not all visible to a
- single process, which is all we're concerned with here. */
+/*
+ * Note that the PPC environment architecture talks about 80 bit
+ * virtual addresses, with segmentation. Obviously that's not all
+ * visible to a single process, which is all we're concerned with
+ * here.
+ */
#ifdef TARGET_ABI32
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#else
@@ -49,7 +54,7 @@
#define TARGET_PAGE_BITS_64K 16
#define TARGET_PAGE_BITS_16M 24
-#else /* defined (TARGET_PPC64) */
+#else /* defined(TARGET_PPC64) */
/* PowerPC 32 definitions */
#define TARGET_LONG_BITS 32
#define TARGET_PAGE_BITS 12
@@ -57,14 +62,14 @@
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#endif /* defined (TARGET_PPC64) */
+#endif /* defined(TARGET_PPC64) */
#define CPUArchState struct CPUPPCState
#include "exec/cpu-defs.h"
#include "cpu-qom.h"
-#if defined (TARGET_PPC64)
+#if defined(TARGET_PPC64)
#define PPC_ELF_MACHINE EM_PPC64
#else
#define PPC_ELF_MACHINE EM_PPC
@@ -237,9 +242,11 @@ struct ppc_spr_t {
const char *name;
target_ulong default_value;
#ifdef CONFIG_KVM
- /* We (ab)use the fact that all the SPRs will have ids for the
+ /*
+ * We (ab)use the fact that all the SPRs will have ids for the
* ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
- * don't sync this */
+ * don't sync this
+ */
uint64_t one_reg_id;
#endif
};
@@ -656,39 +663,39 @@ enum {
#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
0x1F)
-#define FP_FX (1ull << FPSCR_FX)
-#define FP_FEX (1ull << FPSCR_FEX)
-#define FP_VX (1ull << FPSCR_VX)
-#define FP_OX (1ull << FPSCR_OX)
-#define FP_UX (1ull << FPSCR_UX)
-#define FP_ZX (1ull << FPSCR_ZX)
-#define FP_XX (1ull << FPSCR_XX)
-#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
-#define FP_VXISI (1ull << FPSCR_VXISI)
-#define FP_VXIDI (1ull << FPSCR_VXIDI)
-#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
-#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
-#define FP_VXVC (1ull << FPSCR_VXVC)
-#define FP_FR (1ull << FSPCR_FR)
-#define FP_FI (1ull << FPSCR_FI)
-#define FP_C (1ull << FPSCR_C)
-#define FP_FL (1ull << FPSCR_FL)
-#define FP_FG (1ull << FPSCR_FG)
-#define FP_FE (1ull << FPSCR_FE)
-#define FP_FU (1ull << FPSCR_FU)
-#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
-#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
-#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
-#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
-#define FP_VXCVI (1ull << FPSCR_VXCVI)
-#define FP_VE (1ull << FPSCR_VE)
-#define FP_OE (1ull << FPSCR_OE)
-#define FP_UE (1ull << FPSCR_UE)
-#define FP_ZE (1ull << FPSCR_ZE)
-#define FP_XE (1ull << FPSCR_XE)
-#define FP_NI (1ull << FPSCR_NI)
-#define FP_RN1 (1ull << FPSCR_RN1)
-#define FP_RN (1ull << FPSCR_RN)
+#define FP_FX (1ull << FPSCR_FX)
+#define FP_FEX (1ull << FPSCR_FEX)
+#define FP_VX (1ull << FPSCR_VX)
+#define FP_OX (1ull << FPSCR_OX)
+#define FP_UX (1ull << FPSCR_UX)
+#define FP_ZX (1ull << FPSCR_ZX)
+#define FP_XX (1ull << FPSCR_XX)
+#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
+#define FP_VXISI (1ull << FPSCR_VXISI)
+#define FP_VXIDI (1ull << FPSCR_VXIDI)
+#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
+#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
+#define FP_VXVC (1ull << FPSCR_VXVC)
+#define FP_FR (1ull << FSPCR_FR)
+#define FP_FI (1ull << FPSCR_FI)
+#define FP_C (1ull << FPSCR_C)
+#define FP_FL (1ull << FPSCR_FL)
+#define FP_FG (1ull << FPSCR_FG)
+#define FP_FE (1ull << FPSCR_FE)
+#define FP_FU (1ull << FPSCR_FU)
+#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
+#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
+#define FP_VXCVI (1ull << FPSCR_VXCVI)
+#define FP_VE (1ull << FPSCR_VE)
+#define FP_OE (1ull << FPSCR_OE)
+#define FP_UE (1ull << FPSCR_UE)
+#define FP_ZE (1ull << FPSCR_ZE)
+#define FP_XE (1ull << FPSCR_XE)
+#define FP_NI (1ull << FPSCR_NI)
+#define FP_RN1 (1ull << FPSCR_RN1)
+#define FP_RN (1ull << FPSCR_RN)
/* the exception bits which can be cleared by mcrfs - includes FX */
#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
@@ -698,8 +705,8 @@ enum {
/*****************************************************************************/
/* Vector status and control register */
-#define VSCR_NJ 16 /* Vector non-java */
-#define VSCR_SAT 0 /* Vector saturation */
+#define VSCR_NJ 16 /* Vector non-java */
+#define VSCR_SAT 0 /* Vector saturation */
/*****************************************************************************/
/* BookE e500 MMU registers */
@@ -962,9 +969,10 @@ struct ppc_radix_page_info {
/*****************************************************************************/
/* The whole PowerPC CPU context */
-/* PowerPC needs eight modes for different hypervisor/supervisor/guest +
- * real/paged mode combinations. The other two modes are for external PID
- * load/store.
+/*
+ * PowerPC needs eight modes for different hypervisor/supervisor/guest
+ * + real/paged mode combinations. The other two modes are for
+ * external PID load/store.
*/
#define NB_MMU_MODES 10
#define MMU_MODE8_SUFFIX _epl
@@ -976,8 +984,9 @@ struct ppc_radix_page_info {
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
struct CPUPPCState {
- /* First are the most commonly used resources
- * during translated code execution
+ /*
+ * First are the most commonly used resources during translated
+ * code execution
*/
/* general purpose registers */
target_ulong gpr[32];
@@ -1023,8 +1032,8 @@ struct CPUPPCState {
/* High part of 128-bit helper return. */
uint64_t retxh;
- int access_type; /* when a memory exception occurs, the access
- type is stored here */
+ /* when a memory exception occurs, the access type is stored here */
+ int access_type;
CPU_COMMON
@@ -1072,8 +1081,10 @@ struct CPUPPCState {
/* SPE registers */
uint64_t spe_acc;
uint32_t spe_fscr;
- /* SPE and Altivec can share a status since they will never be used
- * simultaneously */
+ /*
+ * SPE and Altivec can share a status since they will never be
+ * used simultaneously
+ */
float_status vec_status;
/* Internal devices resources */
@@ -1103,7 +1114,8 @@ struct CPUPPCState {
int error_code;
uint32_t pending_interrupts;
#if !defined(CONFIG_USER_ONLY)
- /* This is the IRQ controller, which is implementation dependent
+ /*
+ * This is the IRQ controller, which is implementation dependent
* and only relevant when emulating a complete machine.
*/
uint32_t irq_input_state;
@@ -1117,7 +1129,8 @@ struct CPUPPCState {
hwaddr mpic_iack;
/* true when the external proxy facility mode is enabled */
bool mpic_proxy;
- /* set when the processor has an HV mode, thus HV priv
+ /*
+ * set when the processor has an HV mode, thus HV priv
* instructions and SPRs are diallowed if MSR:HV is 0
*/
bool has_hv_mode;
@@ -1149,8 +1162,10 @@ struct CPUPPCState {
/* booke timers */
- /* Specifies bit locations of the Time Base used to signal a fixed timer
- * exception on a transition from 0 to 1. (watchdog or fixed-interval timer)
+ /*
+ * Specifies bit locations of the Time Base used to signal a fixed
+ * timer exception on a transition from 0 to 1. (watchdog or
+ * fixed-interval timer)
*
* 0 selects the least significant bit.
* 63 selects the most significant bit.
@@ -1250,8 +1265,8 @@ struct PPCVirtualHypervisorClass {
void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
const ppc_hash_pte64_t *hptes,
hwaddr ptex, int n);
- void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex,
- uint64_t pte0, uint64_t pte1);
+ void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
+ void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
};
@@ -1290,53 +1305,54 @@ extern const struct VMStateDescription vmstate_ppc_cpu;
/*****************************************************************************/
void ppc_translate_init(void);
-/* you can call this signal handler from your SIGBUS and SIGSEGV
- signal handlers to inform the virtual CPU of exceptions. non zero
- is returned if the signal was handled by the virtual CPU. */
-int cpu_ppc_signal_handler (int host_signum, void *pinfo,
- void *puc);
+/*
+ * you can call this signal handler from your SIGBUS and SIGSEGV
+ * signal handlers to inform the virtual CPU of exceptions. non zero
+ * is returned if the signal was handled by the virtual CPU.
+ */
+int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc);
#if defined(CONFIG_USER_ONLY)
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#endif
#if !defined(CONFIG_USER_ONLY)
-void ppc_store_sdr1 (CPUPPCState *env, target_ulong value);
+void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
void ppc_store_ptcr(CPUPPCState *env, target_ulong value);
#endif /* !defined(CONFIG_USER_ONLY) */
-void ppc_store_msr (CPUPPCState *env, target_ulong value);
+void ppc_store_msr(CPUPPCState *env, target_ulong value);
void ppc_cpu_list(void);
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
-uint64_t cpu_ppc_load_tbl (CPUPPCState *env);
-uint32_t cpu_ppc_load_tbu (CPUPPCState *env);
-void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
-void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
-uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
-uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
-void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
-void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
+uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
+void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_atbl(CPUPPCState *env);
+uint32_t cpu_ppc_load_atbu(CPUPPCState *env);
+void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value);
bool ppc_decr_clear_on_delivery(CPUPPCState *env);
target_ulong cpu_ppc_load_decr(CPUPPCState *env);
void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value);
target_ulong cpu_ppc_load_hdecr(CPUPPCState *env);
void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value);
-uint64_t cpu_ppc_load_purr (CPUPPCState *env);
-uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
-uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
+uint64_t cpu_ppc_load_purr(CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env);
#if !defined(CONFIG_USER_ONLY)
-void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
-void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
-target_ulong load_40x_pit (CPUPPCState *env);
-void store_40x_pit (CPUPPCState *env, target_ulong val);
-void store_40x_dbcr0 (CPUPPCState *env, uint32_t val);
-void store_40x_sler (CPUPPCState *env, uint32_t val);
-void store_booke_tcr (CPUPPCState *env, target_ulong val);
-void store_booke_tsr (CPUPPCState *env, target_ulong val);
-void ppc_tlb_invalidate_all (CPUPPCState *env);
-void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
+void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value);
+void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value);
+target_ulong load_40x_pit(CPUPPCState *env);
+void store_40x_pit(CPUPPCState *env, target_ulong val);
+void store_40x_dbcr0(CPUPPCState *env, uint32_t val);
+void store_40x_sler(CPUPPCState *env, uint32_t val);
+void store_booke_tcr(CPUPPCState *env, target_ulong val);
+void store_booke_tsr(CPUPPCState *env, target_ulong val);
+void ppc_tlb_invalidate_all(CPUPPCState *env);
+void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr);
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
#endif
#endif
@@ -1349,7 +1365,8 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
gprv = env->gpr[gprn];
if (env->flags & POWERPC_FLAG_SPE) {
- /* If the CPU implements the SPE extension, we have to get the
+ /*
+ * If the CPU implements the SPE extension, we have to get the
* high bits of the GPR from the gprh storage area
*/
gprv &= 0xFFFFFFFFULL;
@@ -1360,8 +1377,8 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
}
/* Device control registers */
-int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
-int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
+int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
+int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU
#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX
@@ -1372,7 +1389,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
/* MMU modes definitions */
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
+static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch)
{
return ifetch ? env->immu_idx : env->dmmu_idx;
}
@@ -1990,17 +2007,17 @@ void ppc_compat_add_property(Object *obj, const char *name,
/* External Input Interrupt Directed to Guest State */
#define EPCR_EXTGS (1 << 31)
-#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
-#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
-#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
-#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
-#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
+#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
-#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
-#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
-#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
-#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
-#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
+#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
+#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
+#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
/* HID0 bits */
#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
@@ -2226,7 +2243,8 @@ enum {
};
/*****************************************************************************/
-/* Memory access type :
+/*
+ * Memory access type :
* may be needed for precise access rights control and precise exceptions.
*/
enum {
@@ -2242,8 +2260,9 @@ enum {
ACCESS_CACHE = 0x60, /* Cache manipulation */
};
-/* Hardware interruption sources:
- * all those exception can be raised simulteaneously
+/*
+ * Hardware interrupt sources:
+ * all those exception can be raised simulteaneously
*/
/* Input pins definitions */
enum {
@@ -2325,9 +2344,11 @@ enum {
enum {
/* POWER7 input pins */
POWER7_INPUT_INT = 0,
- /* POWER7 probably has other inputs, but we don't care about them
+ /*
+ * POWER7 probably has other inputs, but we don't care about them
* for any existing machine. We can wire these up when we need
- * them */
+ * them
+ */
POWER7_INPUT_NB,
};
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
index 9164fe7..f102177 100644
--- a/target/ppc/dfp_helper.c
+++ b/target/ppc/dfp_helper.c
@@ -1104,19 +1104,19 @@ void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t s) \
} \
} \
\
- while (offset < (size)/4) { \
+ while (offset < (size) / 4) { \
n++; \
- digits[(size)/4-n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
- if (digits[(size)/4-n] > 10) { \
+ digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
+ if (digits[(size) / 4 - n] > 10) { \
dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
return; \
} else { \
- nonzero |= (digits[(size)/4-n] > 0); \
+ nonzero |= (digits[(size) / 4 - n] > 0); \
} \
} \
\
if (nonzero) { \
- decNumberSetBCD(&dfp.t, digits+((size)/4)-n, n); \
+ decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \
} \
\
if (s && sgn) { \
@@ -1170,13 +1170,13 @@ DFP_HELPER_XEX(dxexq, 128)
static void dfp_set_raw_exp_64(uint64_t *t, uint64_t raw)
{
*t &= 0x8003ffffffffffffULL;
- *t |= (raw << (63-13));
+ *t |= (raw << (63 - 13));
}
static void dfp_set_raw_exp_128(uint64_t *t, uint64_t raw)
{
t[HI_IDX] &= 0x80003fffffffffffULL;
- t[HI_IDX] |= (raw << (63-17));
+ t[HI_IDX] |= (raw << (63 - 17));
}
#define DFP_HELPER_IEX(op, size) \
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index beafcf1..ec2c177 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -25,9 +25,9 @@
#include "internal.h"
#include "helper_regs.h"
-//#define DEBUG_OP
-//#define DEBUG_SOFTWARE_TLB
-//#define DEBUG_EXCEPTIONS
+/* #define DEBUG_OP */
+/* #define DEBUG_SOFTWARE_TLB */
+/* #define DEBUG_EXCEPTIONS */
#ifdef DEBUG_EXCEPTIONS
# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
@@ -126,8 +126,9 @@ static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
return offset;
}
-/* Note that this function should be greatly optimized
- * when called with a constant excp, from ppc_hw_interrupt
+/*
+ * Note that this function should be greatly optimized when called
+ * with a constant excp, from ppc_hw_interrupt
*/
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
{
@@ -147,7 +148,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
msr = env->msr & ~0x783f0000ULL;
}
- /* new interrupt handler msr preserves existing HV and ME unless
+ /*
+ * new interrupt handler msr preserves existing HV and ME unless
* explicitly overriden
*/
new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
@@ -166,7 +168,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
excp = powerpc_reset_wakeup(cs, env, excp, &msr);
}
- /* Exception targetting modifiers
+ /*
+ * Exception targetting modifiers
*
* LPES0 is supported on POWER7/8/9
* LPES1 is not supported (old iSeries mode)
@@ -194,7 +197,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
ail = 0;
}
- /* Hypervisor emulation assistance interrupt only exists on server
+ /*
+ * Hypervisor emulation assistance interrupt only exists on server
* arch 2.05 server or later. We also don't want to generate it if
* we don't have HVB in msr_mask (PAPR mode).
*/
@@ -229,8 +233,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
if (msr_me == 0) {
- /* Machine check exception is not enabled.
- * Enter checkstop state.
+ /*
+ * Machine check exception is not enabled. Enter
+ * checkstop state.
*/
fprintf(stderr, "Machine check while not allowed. "
"Entering checkstop state\n");
@@ -242,8 +247,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
cpu_interrupt_exittb(cs);
}
if (env->msr_mask & MSR_HVB) {
- /* ISA specifies HV, but can be delivered to guest with HV clear
- * (e.g., see FWNMI in PAPR).
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR).
*/
new_msr |= (target_ulong)MSR_HVB;
}
@@ -294,9 +300,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
/* Get rS/rD and rA from faulting opcode */
- /* Note: the opcode fields will not be set properly for a direct
- * store load/store, but nobody cares as nobody actually uses
- * direct store segments.
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
*/
env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
break;
@@ -310,7 +317,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
return;
}
- /* FP exceptions always have NIP pointing to the faulting
+ /*
+ * FP exceptions always have NIP pointing to the faulting
* instruction, so always use store_next and claim we are
* precise in the MSR.
*/
@@ -341,7 +349,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
dump_syscall(env);
lev = env->error_code;
- /* We need to correct the NIP which in this case is supposed
+ /*
+ * We need to correct the NIP which in this case is supposed
* to point to the next instruction
*/
env->nip += 4;
@@ -425,8 +434,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
new_msr |= ((target_ulong)1 << MSR_ME);
}
if (env->msr_mask & MSR_HVB) {
- /* ISA specifies HV, but can be delivered to guest with HV clear
- * (e.g., see FWNMI in PAPR, NMI injection in QEMU).
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
*/
new_msr |= (target_ulong)MSR_HVB;
} else {
@@ -675,7 +685,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
env->spr[asrr1] = env->spr[srr1];
}
- /* Sort out endianness of interrupt, this differs depending on the
+ /*
+ * Sort out endianness of interrupt, this differs depending on the
* CPU, the HV mode, etc...
*/
#ifdef TARGET_PPC64
@@ -716,8 +727,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
vector |= env->excp_prefix;
- /* AIL only works if there is no HV transition and we are running with
- * translations enabled
+ /*
+ * AIL only works if there is no HV transition and we are running
+ * with translations enabled
*/
if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
@@ -745,8 +757,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
}
#endif
- /* We don't use hreg_store_msr here as already have treated
- * any special case that could occur. Just store MSR and update hflags
+ /*
+ * We don't use hreg_store_msr here as already have treated any
+ * special case that could occur. Just store MSR and update hflags
*
* Note: We *MUST* not use hreg_store_msr() as-is anyway because it
* will prevent setting of the HV bit which some exceptions might need
@@ -762,8 +775,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Reset the reservation */
env->reserve_addr = -1;
- /* Any interrupt is context synchronizing, check if TCG TLB
- * needs a delayed flush on ppc64
+ /*
+ * Any interrupt is context synchronizing, check if TCG TLB needs
+ * a delayed flush on ppc64
*/
check_tlb_flush(env, false);
}
@@ -1015,8 +1029,9 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
cs = CPU(ppc_env_get_cpu(env));
cs->halted = 1;
- /* The architecture specifies that HDEC interrupts are
- * discarded in PM states
+ /*
+ * The architecture specifies that HDEC interrupts are discarded
+ * in PM states
*/
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
@@ -1047,8 +1062,9 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
#if defined(DEBUG_OP)
cpu_dump_rfi(env->nip, env->msr);
#endif
- /* No need to raise an exception here,
- * as rfi is always the last insn of a TB
+ /*
+ * No need to raise an exception here, as rfi is always the last
+ * insn of a TB
*/
cpu_interrupt_exittb(cs);
/* Reset the reservation */
@@ -1067,8 +1083,9 @@ void helper_rfi(CPUPPCState *env)
#if defined(TARGET_PPC64)
void helper_rfid(CPUPPCState *env)
{
- /* The architeture defines a number of rules for which bits
- * can change but in practice, we handle this in hreg_store_msr()
+ /*
+ * The architeture defines a number of rules for which bits can
+ * change but in practice, we handle this in hreg_store_msr()
* which will be called by do_rfi(), so there is no need to filter
* here
*/
@@ -1206,9 +1223,11 @@ static int book3s_dbell2irq(target_ulong rb)
{
int msg = rb & DBELL_TYPE_MASK;
- /* A Directed Hypervisor Doorbell message is sent only if the
+ /*
+ * A Directed Hypervisor Doorbell message is sent only if the
* message type is 5. All other types are reserved and the
- * instruction is a no-op */
+ * instruction is a no-op
+ */
return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1;
}
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 2ed4f42..0b7308f 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -90,10 +90,12 @@ uint32_t helper_tosingle(uint64_t arg)
ret = extract64(arg, 62, 2) << 30;
ret |= extract64(arg, 29, 30);
} else {
- /* Zero or Denormal result. If the exponent is in bounds for
- * a single-precision denormal result, extract the proper bits.
- * If the input is not zero, and the exponent is out of bounds,
- * then the result is undefined; this underflows to zero.
+ /*
+ * Zero or Denormal result. If the exponent is in bounds for
+ * a single-precision denormal result, extract the proper
+ * bits. If the input is not zero, and the exponent is out of
+ * bounds, then the result is undefined; this underflows to
+ * zero.
*/
ret = extract64(arg, 63, 1) << 31;
if (unlikely(exp >= 874)) {
@@ -1090,7 +1092,7 @@ uint32_t helper_ftsqrt(uint64_t frb)
fe_flag = 1;
} else if (unlikely(float64_is_neg(frb))) {
fe_flag = 1;
- } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
+ } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
fe_flag = 1;
}
@@ -1789,7 +1791,8 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
#define float64_to_float64(x, env) x
-/* VSX_ADD_SUB - VSX floating point add/subract
+/*
+ * VSX_ADD_SUB - VSX floating point add/subract
* name - instruction mnemonic
* op - operation (add or sub)
* nels - number of elements (1, 2 or 4)
@@ -1872,7 +1875,8 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
-/* VSX_MUL - VSX floating point multiply
+/*
+ * VSX_MUL - VSX floating point multiply
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -1950,7 +1954,8 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
-/* VSX_DIV - VSX floating point divide
+/*
+ * VSX_DIV - VSX floating point divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2034,7 +2039,8 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
-/* VSX_RE - VSX floating point reciprocal estimate
+/*
+ * VSX_RE - VSX floating point reciprocal estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2075,7 +2081,8 @@ VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
-/* VSX_SQRT - VSX floating point square root
+/*
+ * VSX_SQRT - VSX floating point square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2124,7 +2131,8 @@ VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
-/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
+/*
+ *VSX_RSQRTE - VSX floating point reciprocal square root estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2174,7 +2182,8 @@ VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
-/* VSX_TDIV - VSX floating point test for divide
+/*
+ * VSX_TDIV - VSX floating point test for divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2207,18 +2216,20 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tp##_is_any_nan(xa.fld) || \
tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
- } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
+ } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
fe_flag = 1; \
} else if (!tp##_is_zero(xa.fld) && \
(((e_a - e_b) >= emax) || \
- ((e_a - e_b) <= (emin+1)) || \
- (e_a <= (emin+nbits)))) { \
+ ((e_a - e_b) <= (emin + 1)) || \
+ (e_a <= (emin + nbits)))) { \
fe_flag = 1; \
} \
\
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
- /* XB is not zero because of the above check and */ \
- /* so must be denormalized. */ \
+ /* \
+ * XB is not zero because of the above check and so \
+ * must be denormalized. \
+ */ \
fg_flag = 1; \
} \
} \
@@ -2231,7 +2242,8 @@ VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
-/* VSX_TSQRT - VSX floating point test for square root
+/*
+ * VSX_TSQRT - VSX floating point test for square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2266,13 +2278,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} else if (unlikely(tp##_is_neg(xb.fld))) { \
fe_flag = 1; \
} else if (!tp##_is_zero(xb.fld) && \
- (e_b <= (emin+nbits))) { \
+ (e_b <= (emin + nbits))) { \
fe_flag = 1; \
} \
\
if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
- /* XB is not zero because of the above check and */ \
- /* therefore must be denormalized. */ \
+ /* \
+ * XB is not zero because of the above check and \
+ * therefore must be denormalized. \
+ */ \
fg_flag = 1; \
} \
} \
@@ -2285,7 +2299,8 @@ VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
-/* VSX_MADD - VSX floating point muliply/add variations
+/*
+ * VSX_MADD - VSX floating point muliply/add variations
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2322,8 +2337,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
- /* Avoid double rounding errors by rounding the intermediate */ \
- /* result to odd. */ \
+ /* \
+ * Avoid double rounding errors by rounding the intermediate \
+ * result to odd. \
+ */ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
maddflgs, &tstat); \
@@ -2388,7 +2405,8 @@ VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
-/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+/*
+ * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
* op - instruction mnemonic
* cmp - comparison operation
* exp - expected result of comparison
@@ -2604,7 +2622,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_SCALAR_CMPQ(xscmpoqp, 1)
VSX_SCALAR_CMPQ(xscmpuqp, 0)
-/* VSX_MAX_MIN - VSX floating point maximum/minimum
+/*
+ * VSX_MAX_MIN - VSX floating point maximum/minimum
* name - instruction mnemonic
* op - operation (max or min)
* nels - number of elements (1, 2 or 4)
@@ -2733,7 +2752,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
VSX_MAX_MINJ(xsmaxjdp, 1);
VSX_MAX_MINJ(xsminjdp, 0);
-/* VSX_CMP - VSX floating point compare
+/*
+ * VSX_CMP - VSX floating point compare
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -2778,7 +2798,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
putVSR(xT(opcode), &xt, env); \
- if ((opcode >> (31-21)) & 1) { \
+ if ((opcode >> (31 - 21)) & 1) { \
env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
} \
do_float_check_status(env, GETPC()); \
@@ -2793,7 +2813,8 @@ VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
-/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
+/*
+ * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@@ -2829,10 +2850,11 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
-VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
-VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
-/* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
+/*
+ * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@@ -2868,7 +2890,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
-/* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
+/*
+ * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
* involving one half precision value
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
@@ -2953,7 +2976,8 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
return float32_to_float64(xb >> 32, &tstat);
}
-/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
+/*
+ * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (float32 or float64)
@@ -2996,17 +3020,18 @@ VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
0x80000000U)
VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
-VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
-VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
0x8000000000000000ULL)
VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
-/* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
+/*
+ * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
* op - instruction mnemonic
* stp - source type (float32 or float64)
* ttp - target type (int32, uint32, int64 or uint64)
@@ -3040,7 +3065,8 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
-/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
+/*
+ * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* stp - source type (int32, uint32, int64 or uint64)
@@ -3079,14 +3105,15 @@ VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
-/* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
+/*
+ * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
* op - instruction mnemonic
* stp - source type (int32, uint32, int64 or uint64)
* ttp - target type (float32 or float64)
@@ -3111,13 +3138,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
-/* For "use current rounding mode", define a value that will not be one of
- * the existing rounding model enums.
+/*
+ * For "use current rounding mode", define a value that will not be
+ * one of the existing rounding model enums.
*/
#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
float_round_up + float_round_to_zero)
-/* VSX_ROUND - VSX floating point round
+/*
+ * VSX_ROUND - VSX floating point round
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
@@ -3150,9 +3179,11 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
} \
\
- /* If this is not a "use current rounding mode" instruction, \
+ /* \
+ * If this is not a "use current rounding mode" instruction, \
* then inhibit setting of the XX bit and restore rounding \
- * mode from FPSCR */ \
+ * mode from FPSCR \
+ */ \
if (rmode != FLOAT_ROUND_CURRENT) { \
fpscr_set_rounding_mode(env); \
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
@@ -3234,7 +3265,8 @@ void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
putVSR(xT(opcode), &xt, env);
}
-/* VSX_TEST_DC - VSX floating point test data class
+/*
+ * VSX_TEST_DC - VSX floating point test data class
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* xbn - VSR register number
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
index fbf3821..ce3625f 100644
--- a/target/ppc/gdbstub.c
+++ b/target/ppc/gdbstub.c
@@ -33,14 +33,14 @@ static int ppc_gdb_register_len_apple(int n)
return 8;
case 64 ... 95:
return 16;
- case 64+32: /* nip */
- case 65+32: /* msr */
- case 67+32: /* lr */
- case 68+32: /* ctr */
- case 70+32: /* fpscr */
+ case 64 + 32: /* nip */
+ case 65 + 32: /* msr */
+ case 67 + 32: /* lr */
+ case 68 + 32: /* ctr */
+ case 70 + 32: /* fpscr */
return 8;
- case 66+32: /* cr */
- case 69+32: /* xer */
+ case 66 + 32: /* cr */
+ case 69 + 32: /* xer */
return 4;
default:
return 0;
@@ -84,11 +84,14 @@ static int ppc_gdb_register_len(int n)
}
}
-/* We need to present the registers to gdb in the "current" memory ordering.
- For user-only mode we get this for free; TARGET_WORDS_BIGENDIAN is set to
- the proper ordering for the binary, and cannot be changed.
- For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
- the current mode of the chip to see if we're running in little-endian. */
+/*
+ * We need to present the registers to gdb in the "current" memory
+ * ordering. For user-only mode we get this for free;
+ * TARGET_WORDS_BIGENDIAN is set to the proper ordering for the
+ * binary, and cannot be changed. For system mode,
+ * TARGET_WORDS_BIGENDIAN is always set, and we must check the current
+ * mode of the chip to see if we're running in little-endian.
+ */
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
@@ -104,11 +107,12 @@ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
#endif
}
-/* Old gdb always expects FP registers. Newer (xml-aware) gdb only
+/*
+ * Old gdb always expects FP registers. Newer (xml-aware) gdb only
* expects whatever the target description contains. Due to a
* historical mishap the FP registers appear in between core integer
- * regs and PC, MSR, CR, and so forth. We hack round this by giving the
- * FP regs zero size when talking to a newer gdb.
+ * regs and PC, MSR, CR, and so forth. We hack round this by giving
+ * the FP regs zero size when talking to a newer gdb.
*/
int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
index c863abc..922da76 100644
--- a/target/ppc/helper_regs.h
+++ b/target/ppc/helper_regs.h
@@ -44,10 +44,11 @@ static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
static inline void hreg_compute_mem_idx(CPUPPCState *env)
{
- /* This is our encoding for server processors. The architecture
+ /*
+ * This is our encoding for server processors. The architecture
* specifies that there is no such thing as userspace with
- * translation off, however it appears that MacOS does it and
- * some 32-bit CPUs support it. Weird...
+ * translation off, however it appears that MacOS does it and some
+ * 32-bit CPUs support it. Weird...
*
* 0 = Guest User space virtual mode
* 1 = Guest Kernel space virtual mode
@@ -143,7 +144,8 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
/* Change the exception prefix on PowerPC 601 */
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
}
- /* If PR=1 then EE, IR and DR must be 1
+ /*
+ * If PR=1 then EE, IR and DR must be 1
*
* Note: We only enforce this on 64-bit server processors.
* It appears that:
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 162add5..f6a088a 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -137,7 +137,8 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
/* if x = 0xab, returns 0xababababababababa */
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
-/* substract 1 from each byte, and with inverse, check if MSB is set at each
+/*
+ * subtract 1 from each byte, and with inverse, check if MSB is set at each
* byte.
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
@@ -156,7 +157,8 @@ uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
#undef haszero
#undef hasvalue
-/* Return invalid random number.
+/*
+ * Return invalid random number.
*
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
* random number
@@ -181,7 +183,7 @@ uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
uint64_t ra = 0;
for (i = 0; i < 8; i++) {
- int index = (rs >> (i*8)) & 0xFF;
+ int index = (rs >> (i * 8)) & 0xFF;
if (index < 64) {
if (rb & PPC_BIT(index)) {
ra |= 1 << i;
@@ -370,7 +372,8 @@ target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
/* 602 specific instructions */
/* mfrom is the most crazy instruction ever seen, imho ! */
/* Real implementation uses a ROM table. Do the same */
-/* Extremely decomposed:
+/*
+ * Extremely decomposed:
* -arg / 256
* return 256 * log10(10 + 1.0) + 0.5
*/
@@ -393,7 +396,7 @@ target_ulong helper_602_mfrom(target_ulong arg)
for (index = 0; index < ARRAY_SIZE(r->element); index++)
#else
#define VECTOR_FOR_INORDER_I(index, element) \
- for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
+ for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
#endif
/* Saturating arithmetic helpers. */
@@ -634,7 +637,8 @@ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
} \
}
-/* VABSDU - Vector absolute difference unsigned
+/*
+ * VABSDU - Vector absolute difference unsigned
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
@@ -739,7 +743,8 @@ void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
} \
}
-/* VCMPNEZ - Vector compare not equal to zero
+/*
+ * VCMPNEZ - Vector compare not equal to zero
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
@@ -1138,7 +1143,7 @@ void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
-#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
#define EXTRACT_BIT(avr, i, index) \
@@ -1169,7 +1174,7 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
int index = VBPERMQ_INDEX(b, i);
if (index < 128) {
- uint64_t mask = (1ull << (63-(index & 0x3F)));
+ uint64_t mask = (1ull << (63 - (index & 0x3F)));
if (a->u64[VBPERMQ_DW(index)] & mask) {
perm |= (0x8000 >> i);
}
@@ -1449,9 +1454,9 @@ void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u8) {
#if defined(HOST_WORDS_BIGENDIAN)
- t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
+ t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
#else
- t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (7-(i & 7));
+ t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7));
#endif
}
@@ -1463,19 +1468,19 @@ void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i, j; \
- trgtyp prod[sizeof(ppc_avr_t)/sizeof(a->srcfld[0])]; \
+ trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
\
VECTOR_FOR_INORDER_I(i, srcfld) { \
prod[i] = 0; \
for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
- if (a->srcfld[i] & (1ull<<j)) { \
+ if (a->srcfld[i] & (1ull << j)) { \
prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
} \
} \
} \
\
VECTOR_FOR_INORDER_I(i, trgfld) { \
- r->trgfld[i] = prod[2*i] ^ prod[2*i+1]; \
+ r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
} \
}
@@ -1493,7 +1498,7 @@ void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u64) {
prod[i] = 0;
for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull<<j)) {
+ if (a->u64[i] & (1ull << j)) {
prod[i] ^= (((__uint128_t)b->u64[i]) << j);
}
}
@@ -1508,7 +1513,7 @@ void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(i, u64) {
prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull<<j)) {
+ if (a->u64[i] & (1ull << j)) {
ppc_avr_t bshift;
if (j == 0) {
bshift.VsrD(0) = 0;
@@ -1548,9 +1553,9 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
VECTOR_FOR_INORDER_I(j, u32) {
uint32_t e = x[i]->u32[j];
- result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
- ((e >> 6) & 0x3e0) |
- ((e >> 3) & 0x1f));
+ result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
+ ((e >> 6) & 0x3e0) |
+ ((e >> 3) & 0x1f));
}
}
*r = result;
@@ -1568,7 +1573,7 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
\
VECTOR_FOR_INORDER_I(i, from) { \
result.to[i] = cvt(a0->from[i], &sat); \
- result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
+ result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
} \
*r = result; \
if (dosat && sat) { \
@@ -1736,9 +1741,11 @@ VEXTU_X_DO(vextuhrx, 16, 0)
VEXTU_X_DO(vextuwrx, 32, 0)
#undef VEXTU_X_DO
-/* The specification says that the results are undefined if all of the
- * shift counts are not identical. We check to make sure that they are
- * to conform to what real hardware appears to do. */
+/*
+ * The specification says that the results are undefined if all of the
+ * shift counts are not identical. We check to make sure that they
+ * are to conform to what real hardware appears to do.
+ */
#define VSHIFT(suffix, leftp) \
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
@@ -1805,9 +1812,10 @@ void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
int i;
unsigned int shift, bytes;
- /* Use reverse order, as destination and source register can be same. Its
- * being modified in place saving temporary, reverse order will guarantee
- * that computed result is not fed back.
+ /*
+ * Use reverse order, as destination and source register can be
+ * same. Its being modified in place saving temporary, reverse
+ * order will guarantee that computed result is not fed back.
*/
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
shift = b->u8[i] & 0x7; /* extract shift value */
@@ -1840,7 +1848,7 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#if defined(HOST_WORDS_BIGENDIAN)
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
- memset(&r->u8[16-sh], 0, sh);
+ memset(&r->u8[16 - sh], 0, sh);
#else
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
memset(&r->u8[0], 0, sh);
@@ -2112,7 +2120,7 @@ void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
ppc_avr_t result; \
\
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
- uint16_t e = b->u16[hi ? i : i+4]; \
+ uint16_t e = b->u16[hi ? i : i + 4]; \
uint8_t a = (e >> 15) ? 0xff : 0; \
uint8_t r = (e >> 10) & 0x1f; \
uint8_t g = (e >> 5) & 0x1f; \
@@ -2463,7 +2471,7 @@ static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
{
if (n & 1) {
bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F;
- bcd->u8[BCD_DIG_BYTE(n)] |= (digit<<4);
+ bcd->u8[BCD_DIG_BYTE(n)] |= (digit << 4);
} else {
bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0;
bcd->u8[BCD_DIG_BYTE(n)] |= digit;
@@ -3220,7 +3228,7 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
if (st == 0) {
- if ((six & (0x8 >> (2*i))) == 0) {
+ if ((six & (0x8 >> (2 * i))) == 0) {
r->VsrD(i) = ror64(a->VsrD(i), 1) ^
ror64(a->VsrD(i), 8) ^
(a->VsrD(i) >> 7);
@@ -3230,7 +3238,7 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
(a->VsrD(i) >> 6);
}
} else { /* st == 1 */
- if ((six & (0x8 >> (2*i))) == 0) {
+ if ((six & (0x8 >> (2 * i))) == 0) {
r->VsrD(i) = ror64(a->VsrD(i), 28) ^
ror64(a->VsrD(i), 34) ^
ror64(a->VsrD(i), 39);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 59d92c4..02e22e2 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -49,24 +49,14 @@
#include "elf.h"
#include "sysemu/kvm_int.h"
-//#define DEBUG_KVM
-
-#ifdef DEBUG_KVM
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
-static int cap_interrupt_unset = false;
-static int cap_interrupt_level = false;
+static int cap_interrupt_unset;
+static int cap_interrupt_level;
static int cap_segstate;
static int cap_booke_sregs;
static int cap_ppc_smt;
@@ -96,7 +86,8 @@ static int cap_large_decr;
static uint32_t debug_inst_opcode;
-/* XXX We have a race condition where we actually have a level triggered
+/*
+ * XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
* takes but ignores it, goes to sleep and never gets notified that there's
* still an interrupt pending.
@@ -114,10 +105,12 @@ static void kvm_kick_cpu(void *opaque)
qemu_cpu_kick(CPU(cpu));
}
-/* Check whether we are running with KVM-PR (instead of KVM-HV). This
+/*
+ * Check whether we are running with KVM-PR (instead of KVM-HV). This
* should only be used for fallback tests - generally we should use
* explicit capabilities for the features we want, rather than
- * assuming what is/isn't available depending on the KVM variant. */
+ * assuming what is/isn't available depending on the KVM variant.
+ */
static bool kvmppc_is_pr(KVMState *ks)
{
/* Assume KVM-PR if the GET_PVINFO capability is available */
@@ -143,8 +136,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
- /* Note: we don't set cap_papr here, because this capability is
- * only activated after this by kvmppc_set_papr() */
+ /*
+ * Note: we don't set cap_papr here, because this capability is
+ * only activated after this by kvmppc_set_papr()
+ */
cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
@@ -160,7 +155,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
* in KVM at this moment.
*
* TODO: call kvm_vm_check_extension() with the right capability
- * after the kernel starts implementing it.*/
+ * after the kernel starts implementing it.
+ */
cap_ppc_pvr_compat = false;
if (!cap_interrupt_level) {
@@ -186,10 +182,13 @@ static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
int ret;
if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
- /* What we're really trying to say is "if we're on BookE, we use
- the native PVR for now". This is the only sane way to check
- it though, so we potentially confuse users that they can run
- BookE guests on BookS. Let's hope nobody dares enough :) */
+ /*
+ * What we're really trying to say is "if we're on BookE, we
+ * use the native PVR for now". This is the only sane way to
+ * check it though, so we potentially confuse users that they
+ * can run BookE guests on BookS. Let's hope nobody dares
+ * enough :)
+ */
return 0;
} else {
if (!cap_segstate) {
@@ -421,12 +420,14 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
}
if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
- /* Mostly what guest pagesizes we can use are related to the
+ /*
+ * Mostly what guest pagesizes we can use are related to the
* host pages used to map guest RAM, which is handled in the
* platform code. Cache-Inhibited largepages (64k) however are
* used for I/O, so if they're mapped to the host at all it
* will be a normal mapping, not a special hugepage one used
- * for RAM. */
+ * for RAM.
+ */
if (getpagesize() < 0x10000) {
error_setg(errp,
"KVM can't supply 64kiB CI pages, which guest expects");
@@ -440,9 +441,9 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return POWERPC_CPU(cpu)->vcpu_id;
}
-/* e500 supports 2 h/w breakpoint and 2 watchpoint.
- * book3s supports only 1 watchpoint, so array size
- * of 4 is sufficient for now.
+/*
+ * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
+ * only 1 watchpoint, so array size of 4 is sufficient for now.
*/
#define MAX_HW_BKPTS 4
@@ -497,9 +498,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
break;
case POWERPC_MMU_2_07:
if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
- /* KVM-HV has transactional memory on POWER8 also without the
- * KVM_CAP_PPC_HTM extension, so enable it here instead as
- * long as it's availble to userspace on the host. */
+ /*
+ * KVM-HV has transactional memory on POWER8 also without
+ * the KVM_CAP_PPC_HTM extension, so enable it here
+ * instead as long as it's availble to userspace on the
+ * host.
+ */
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
cap_htm = true;
}
@@ -626,7 +630,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)&fpscr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
+ trace_kvm_failed_fpscr_set(strerror(errno));
return ret;
}
@@ -647,8 +651,8 @@ static int kvm_put_fp(CPUState *cs)
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
- i, strerror(errno));
+ trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
+ strerror(errno));
return ret;
}
}
@@ -659,7 +663,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)&env->vscr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
+ trace_kvm_failed_vscr_set(strerror(errno));
return ret;
}
@@ -668,7 +672,7 @@ static int kvm_put_fp(CPUState *cs)
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
+ trace_kvm_failed_vr_set(i, strerror(errno));
return ret;
}
}
@@ -693,7 +697,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)&fpscr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
+ trace_kvm_failed_fpscr_get(strerror(errno));
return ret;
} else {
env->fpscr = fpscr;
@@ -709,8 +713,8 @@ static int kvm_get_fp(CPUState *cs)
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get %s%d from KVM: %s\n",
- vsx ? "VSR" : "FPR", i, strerror(errno));
+ trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
+ strerror(errno));
return ret;
} else {
#ifdef HOST_WORDS_BIGENDIAN
@@ -733,7 +737,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)&env->vscr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
+ trace_kvm_failed_vscr_get(strerror(errno));
return ret;
}
@@ -742,8 +746,7 @@ static int kvm_get_fp(CPUState *cs)
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get VR%d from KVM: %s\n",
- i, strerror(errno));
+ trace_kvm_failed_vr_get(i, strerror(errno));
return ret;
}
}
@@ -764,7 +767,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
+ trace_kvm_failed_vpa_addr_get(strerror(errno));
return ret;
}
@@ -774,8 +777,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
- strerror(errno));
+ trace_kvm_failed_slb_get(strerror(errno));
return ret;
}
@@ -785,8 +787,7 @@ static int kvm_get_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
- strerror(errno));
+ trace_kvm_failed_dtl_get(strerror(errno));
return ret;
}
@@ -800,10 +801,12 @@ static int kvm_put_vpa(CPUState *cs)
struct kvm_one_reg reg;
int ret;
- /* SLB shadow or DTL can't be registered unless a master VPA is
+ /*
+ * SLB shadow or DTL can't be registered unless a master VPA is
* registered. That means when restoring state, if a VPA *is*
* registered, we need to set that up first. If not, we need to
- * deregister the others before deregistering the master VPA */
+ * deregister the others before deregistering the master VPA
+ */
assert(spapr_cpu->vpa_addr
|| !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
@@ -812,7 +815,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ trace_kvm_failed_vpa_addr_set(strerror(errno));
return ret;
}
}
@@ -823,7 +826,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
+ trace_kvm_failed_slb_set(strerror(errno));
return ret;
}
@@ -833,8 +836,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
- strerror(errno));
+ trace_kvm_failed_dtl_set(strerror(errno));
return ret;
}
@@ -843,7 +845,7 @@ static int kvm_put_vpa(CPUState *cs)
reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret < 0) {
- DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ trace_kvm_failed_null_vpa_addr_set(strerror(errno));
return ret;
}
}
@@ -929,8 +931,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
regs.pid = env->spr[SPR_BOOKE_PID];
- for (i = 0;i < 32; i++)
+ for (i = 0; i < 32; i++) {
regs.gpr[i] = env->gpr[i];
+ }
regs.cr = 0;
for (i = 0; i < 8; i++) {
@@ -938,8 +941,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
- if (ret < 0)
+ if (ret < 0) {
return ret;
+ }
kvm_put_fp(cs);
@@ -962,10 +966,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
if (cap_one_reg) {
int i;
- /* We deliberately ignore errors here, for kernels which have
+ /*
+ * We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
* registers, there's a reasonable chance things will still
- * work, at least until we try to migrate. */
+ * work, at least until we try to migrate.
+ */
for (i = 0; i < 1024; i++) {
uint64_t id = env->spr_cb[i].one_reg_id;
@@ -996,7 +1002,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
if (cap_papr) {
if (kvm_put_vpa(cs) < 0) {
- DPRINTF("Warning: Unable to set VPA information to KVM\n");
+ trace_kvm_failed_put_vpa();
}
}
@@ -1207,8 +1213,9 @@ int kvm_arch_get_registers(CPUState *cs)
int i, ret;
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
- if (ret < 0)
+ if (ret < 0) {
return ret;
+ }
cr = regs.cr;
for (i = 7; i >= 0; i--) {
@@ -1236,8 +1243,9 @@ int kvm_arch_get_registers(CPUState *cs)
env->spr[SPR_BOOKE_PID] = regs.pid;
- for (i = 0;i < 32; i++)
+ for (i = 0; i < 32; i++) {
env->gpr[i] = regs.gpr[i];
+ }
kvm_get_fp(cs);
@@ -1262,10 +1270,12 @@ int kvm_arch_get_registers(CPUState *cs)
if (cap_one_reg) {
int i;
- /* We deliberately ignore errors here, for kernels which have
+ /*
+ * We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
* registers, there's a reasonable chance things will still
- * work, at least until we try to migrate. */
+ * work, at least until we try to migrate.
+ */
for (i = 0; i < 1024; i++) {
uint64_t id = env->spr_cb[i].one_reg_id;
@@ -1296,7 +1306,7 @@ int kvm_arch_get_registers(CPUState *cs)
if (cap_papr) {
if (kvm_get_vpa(cs) < 0) {
- DPRINTF("Warning: Unable to get VPA information from KVM\n");
+ trace_kvm_failed_get_vpa();
}
}
@@ -1339,20 +1349,24 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
qemu_mutex_lock_iothread();
- /* PowerPC QEMU tracks the various core input pins (interrupt, critical
- * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
+ /*
+ * PowerPC QEMU tracks the various core input pins (interrupt,
+ * critical interrupt, reset, etc) in PPC-specific
+ * env->irq_input_state.
+ */
if (!cap_interrupt_level &&
run->ready_for_interrupt_injection &&
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->irq_input_state & (1<<PPC_INPUT_INT)))
+ (env->irq_input_state & (1 << PPC_INPUT_INT)))
{
- /* For now KVM disregards the 'irq' argument. However, in the
- * future KVM could cache it in-kernel to avoid a heavyweight exit
- * when reading the UIC.
+ /*
+ * For now KVM disregards the 'irq' argument. However, in the
+ * future KVM could cache it in-kernel to avoid a heavyweight
+ * exit when reading the UIC.
*/
irq = KVM_INTERRUPT_SET;
- DPRINTF("injected interrupt %d\n", irq);
+ trace_kvm_injected_interrupt(irq);
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
if (r < 0) {
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
@@ -1363,9 +1377,12 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
(NANOSECONDS_PER_SECOND / 50));
}
- /* We don't know if there are more interrupts pending after this. However,
- * the guest will return to userspace in the course of handling this one
- * anyways, so we will get a chance to deliver the rest. */
+ /*
+ * We don't know if there are more interrupts pending after
+ * this. However, the guest will return to userspace in the course
+ * of handling this one anyways, so we will get a chance to
+ * deliver the rest.
+ */
qemu_mutex_unlock_iothread();
}
@@ -1394,18 +1411,22 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
}
/* map dcr access to existing qemu dcr emulation */
-static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
+static int kvmppc_handle_dcr_read(CPUPPCState *env,
+ uint32_t dcrn, uint32_t *data)
{
- if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
+ if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
+ }
return 0;
}
-static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
+static int kvmppc_handle_dcr_write(CPUPPCState *env,
+ uint32_t dcrn, uint32_t data)
{
- if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
+ if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
+ }
return 0;
}
@@ -1697,20 +1718,20 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
- DPRINTF("handle dcr write\n");
+ trace_kvm_handle_dcr_write();
ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
} else {
- DPRINTF("handle dcr read\n");
+ trace_kvm_handle_drc_read();
ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
}
break;
case KVM_EXIT_HLT:
- DPRINTF("handle halt\n");
+ trace_kvm_handle_halt();
ret = kvmppc_handle_halt(cpu);
break;
#if defined(TARGET_PPC64)
case KVM_EXIT_PAPR_HCALL:
- DPRINTF("handle PAPR hypercall\n");
+ trace_kvm_handle_papr_hcall();
run->papr_hcall.ret = spapr_hypercall(cpu,
run->papr_hcall.nr,
run->papr_hcall.args);
@@ -1718,18 +1739,18 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
#endif
case KVM_EXIT_EPR:
- DPRINTF("handle epr\n");
+ trace_kvm_handle_epr();
run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
ret = 0;
break;
case KVM_EXIT_WATCHDOG:
- DPRINTF("handle watchdog expiry\n");
+ trace_kvm_handle_watchdog_expiry();
watchdog_perform_action();
ret = 0;
break;
case KVM_EXIT_DEBUG:
- DPRINTF("handle debug exception\n");
+ trace_kvm_handle_debug_exception();
if (kvm_handle_debug(cpu, run)) {
ret = EXCP_DEBUG;
break;
@@ -1832,7 +1853,7 @@ static int read_cpuinfo(const char *field, char *value, int len)
ret = 0;
break;
}
- } while(*line);
+ } while (*line);
fclose(f);
@@ -1849,7 +1870,8 @@ uint32_t kvmppc_get_tbfreq(void)
return retval;
}
- if (!(ns = strchr(line, ':'))) {
+ ns = strchr(line, ':');
+ if (!ns) {
return retval;
}
@@ -1875,7 +1897,8 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len)
struct dirent *dirp;
DIR *dp;
- if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
+ dp = opendir(PROC_DEVTREE_CPU);
+ if (!dp) {
printf("Can't open directory " PROC_DEVTREE_CPU "\n");
return -1;
}
@@ -1929,10 +1952,11 @@ static uint64_t kvmppc_read_int_dt(const char *filename)
return 0;
}
-/* Read a CPU node property from the host device tree that's a single
+/*
+ * Read a CPU node property from the host device tree that's a single
* integer (32-bit or 64-bit). Returns 0 if anything goes wrong
- * (can't find or open the property, or doesn't understand the
- * format) */
+ * (can't find or open the property, or doesn't understand the format)
+ */
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
{
char buf[PATH_MAX], *tmp;
@@ -1991,7 +2015,7 @@ int kvmppc_get_hasidle(CPUPPCState *env)
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
{
- uint32_t *hc = (uint32_t*)buf;
+ uint32_t *hc = (uint32_t *)buf;
struct kvm_ppc_pvinfo pvinfo;
if (!kvmppc_get_pvinfo(env, &pvinfo)) {
@@ -2064,8 +2088,10 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
exit(1);
}
- /* Update the capability flag so we sync the right information
- * with kvm */
+ /*
+ * Update the capability flag so we sync the right information
+ * with kvm
+ */
cap_papr = 1;
}
@@ -2133,8 +2159,10 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
long rampagesize, best_page_shift;
int i;
- /* Find the largest hardware supported page size that's less than
- * or equal to the (logical) backing page size of guest RAM */
+ /*
+ * Find the largest hardware supported page size that's less than
+ * or equal to the (logical) backing page size of guest RAM
+ */
kvm_get_smmu_info(&info, &error_fatal);
rampagesize = qemu_minrampagesize();
best_page_shift = 0;
@@ -2184,7 +2212,8 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
int fd;
void *table;
- /* Must set fd to -1 so we don't try to munmap when called for
+ /*
+ * Must set fd to -1 so we don't try to munmap when called for
* destroying the table, which the upper layers -will- do
*/
*pfd = -1;
@@ -2229,7 +2258,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
len = nb_table * sizeof(uint64_t);
/* FIXME: round this up to page size */
- table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (table == MAP_FAILED) {
fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
liobn);
@@ -2272,10 +2301,12 @@ int kvmppc_reset_htab(int shift_hint)
int ret;
ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
if (ret == -ENOTTY) {
- /* At least some versions of PR KVM advertise the
+ /*
+ * At least some versions of PR KVM advertise the
* capability, but don't implement the ioctl(). Oops.
* Return 0 so that we allocate the htab in qemu, as is
- * correct for PR. */
+ * correct for PR.
+ */
return 0;
} else if (ret < 0) {
return ret;
@@ -2283,9 +2314,12 @@ int kvmppc_reset_htab(int shift_hint)
return shift;
}
- /* We have a kernel that predates the htab reset calls. For PR
+ /*
+ * We have a kernel that predates the htab reset calls. For PR
* KVM, we need to allocate the htab ourselves, for an HV KVM of
- * this era, it has allocated a 16MB fixed size hash table already. */
+ * this era, it has allocated a 16MB fixed size hash table
+ * already.
+ */
if (kvmppc_is_pr(kvm_state)) {
/* PR - tell caller to allocate htab */
return 0;
@@ -2667,8 +2701,8 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
}
}
} while ((rc != 0)
- && ((max_ns < 0)
- || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
+ && ((max_ns < 0) ||
+ ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
return (rc == 0) ? 1 : 0;
}
@@ -2677,7 +2711,7 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint16_t n_valid, uint16_t n_invalid)
{
struct kvm_get_htab_header *buf;
- size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
+ size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
ssize_t rc;
buf = alloca(chunksize);
@@ -2685,7 +2719,7 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
buf->n_valid = n_valid;
buf->n_invalid = n_invalid;
- qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
+ qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
rc = write(fd, buf, chunksize);
if (rc < 0) {
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 2c2ea30..2238513 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -117,7 +117,8 @@ static inline int kvmppc_get_hasidle(CPUPPCState *env)
return 0;
}
-static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
+static inline int kvmppc_get_hypercall(CPUPPCState *env,
+ uint8_t *buf, int buf_len)
{
return -1;
}
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index a92d0ad..25cdb90 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -24,22 +24,26 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
#endif
target_ulong xer;
- for (i = 0; i < 32; i++)
+ for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->gpr[i]);
+ }
#if !defined(TARGET_PPC64)
- for (i = 0; i < 32; i++)
+ for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->gprh[i]);
+ }
#endif
qemu_get_betls(f, &env->lr);
qemu_get_betls(f, &env->ctr);
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++) {
qemu_get_be32s(f, &env->crf[i]);
+ }
qemu_get_betls(f, &xer);
cpu_write_xer(env, xer);
qemu_get_betls(f, &env->reserve_addr);
qemu_get_betls(f, &env->msr);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
qemu_get_betls(f, &env->tgpr[i]);
+ }
for (i = 0; i < 32; i++) {
union {
float64 d;
@@ -56,14 +60,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &slb_nr);
#endif
qemu_get_betls(f, &sdr1);
- for (i = 0; i < 32; i++)
+ for (i = 0; i < 32; i++) {
qemu_get_betls(f, &env->sr[i]);
- for (i = 0; i < 2; i++)
- for (j = 0; j < 8; j++)
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++) {
qemu_get_betls(f, &env->DBAT[i][j]);
- for (i = 0; i < 2; i++)
- for (j = 0; j < 8; j++)
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++) {
qemu_get_betls(f, &env->IBAT[i][j]);
+ }
+ }
qemu_get_sbe32s(f, &env->nb_tlb);
qemu_get_sbe32s(f, &env->tlb_per_way);
qemu_get_sbe32s(f, &env->nb_ways);
@@ -71,17 +80,19 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &env->id_tlbs);
qemu_get_sbe32s(f, &env->nb_pids);
if (env->tlb.tlb6) {
- // XXX assumes 6xx
+ /* XXX assumes 6xx */
for (i = 0; i < env->nb_tlb; i++) {
qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
}
}
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
qemu_get_betls(f, &env->pb[i]);
- for (i = 0; i < 1024; i++)
+ }
+ for (i = 0; i < 1024; i++) {
qemu_get_betls(f, &env->spr[i]);
+ }
if (!cpu->vhyp) {
ppc_store_sdr1(env, sdr1);
}
@@ -94,8 +105,9 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
qemu_get_sbe32s(f, &env->error_code);
qemu_get_be32s(f, &env->pending_interrupts);
qemu_get_be32s(f, &env->irq_input_state);
- for (i = 0; i < POWERPC_EXCP_NB; i++)
+ for (i = 0; i < POWERPC_EXCP_NB; i++) {
qemu_get_betls(f, &env->excp_vectors[i]);
+ }
qemu_get_betls(f, &env->excp_prefix);
qemu_get_betls(f, &env->ivor_mask);
qemu_get_betls(f, &env->ivpr_mask);
@@ -253,22 +265,24 @@ static int cpu_pre_save(void *opaque)
env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
- env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
- env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
- env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
- env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
+ env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
+ env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
+ env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
+ env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
}
- for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
- env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
- env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
- env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
- env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
+ for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
+ env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
+ env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
+ env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
+ env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
}
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
if (cpu->pre_2_8_migration) {
- /* Mask out bits that got added to msr_mask since the versions
- * which stupidly included it in the migration stream. */
+ /*
+ * Mask out bits that got added to msr_mask since the versions
+ * which stupidly included it in the migration stream.
+ */
target_ulong metamask = 0
#if defined(TARGET_PPC64)
| (1ULL << MSR_TS0)
@@ -277,9 +291,10 @@ static int cpu_pre_save(void *opaque)
;
cpu->mig_msr_mask = env->msr_mask & ~metamask;
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
- /* CPU models supported by old machines all have PPC_MEM_TLBIE,
- * so we set it unconditionally to allow backward migration from
- * a POWER9 host to a POWER8 host.
+ /*
+ * CPU models supported by old machines all have
+ * PPC_MEM_TLBIE, so we set it unconditionally to allow
+ * backward migration from a POWER9 host to a POWER8 host.
*/
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
@@ -379,23 +394,26 @@ static int cpu_post_load(void *opaque, int version_id)
env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
- env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
- env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
- env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
- env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
+ env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
+ env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
+ env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
+ env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
}
- for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
- env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
- env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
- env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
- env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
+ for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
+ env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
+ env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
+ env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
+ env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
}
if (!cpu->vhyp) {
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
}
- /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */
+ /*
+ * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
+ * before restoring
+ */
msr = env->msr;
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
@@ -409,7 +427,7 @@ static bool fpu_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
- return (cpu->env.insns_flags & PPC_FLOAT);
+ return cpu->env.insns_flags & PPC_FLOAT;
}
static const VMStateDescription vmstate_fpu = {
@@ -428,7 +446,7 @@ static bool altivec_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
- return (cpu->env.insns_flags & PPC_ALTIVEC);
+ return cpu->env.insns_flags & PPC_ALTIVEC;
}
static int get_vscr(QEMUFile *f, void *opaque, size_t size,
@@ -483,7 +501,7 @@ static bool vsx_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
- return (cpu->env.insns_flags2 & PPC2_VSX);
+ return cpu->env.insns_flags2 & PPC2_VSX;
}
static const VMStateDescription vmstate_vsx = {
@@ -591,7 +609,7 @@ static bool slb_needed(void *opaque)
PowerPCCPU *cpu = opaque;
/* We don't support any of the old segment table based 64-bit CPUs */
- return (cpu->env.mmu_model & POWERPC_MMU_64);
+ return cpu->env.mmu_model & POWERPC_MMU_64;
}
static int slb_post_load(void *opaque, int version_id)
@@ -600,8 +618,10 @@ static int slb_post_load(void *opaque, int version_id)
CPUPPCState *env = &cpu->env;
int i;
- /* We've pulled in the raw esid and vsid values from the migration
- * stream, but we need to recompute the page size pointers */
+ /*
+ * We've pulled in the raw esid and vsid values from the migration
+ * stream, but we need to recompute the page size pointers
+ */
for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
/* Migration source had bad values in its SLB */
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 9c5a685..5b0f9ee 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -27,7 +27,7 @@
#include "internal.h"
#include "qemu/atomic128.h"
-//#define DEBUG_OP
+/* #define DEBUG_OP */
static inline bool needs_byteswap(const CPUPPCState *env)
{
@@ -103,10 +103,11 @@ void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
do_lsw(env, addr, nb, reg, GETPC());
}
-/* PPC32 specification says we must generate an exception if
- * rA is in the range of registers to be loaded.
- * In an other hand, IBM says this is valid, but rA won't be loaded.
- * For now, I'll follow the spec...
+/*
+ * PPC32 specification says we must generate an exception if rA is in
+ * the range of registers to be loaded. In an other hand, IBM says
+ * this is valid, but rA won't be loaded. For now, I'll follow the
+ * spec...
*/
void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
uint32_t ra, uint32_t rb)
@@ -199,7 +200,8 @@ void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
void helper_icbi(CPUPPCState *env, target_ulong addr)
{
addr &= ~(env->dcache_line_size - 1);
- /* Invalidate one cache line :
+ /*
+ * Invalidate one cache line :
* PowerPC specification says this is to be treated like a load
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
@@ -346,17 +348,19 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
#define LO_IDX 0
#endif
-/* We use msr_le to determine index ordering in a vector. However,
- byteswapping is not simply controlled by msr_le. We also need to take
- into account endianness of the target. This is done for the little-endian
- PPC64 user-mode target. */
+/*
+ * We use msr_le to determine index ordering in a vector. However,
+ * byteswapping is not simply controlled by msr_le. We also need to
+ * take into account endianness of the target. This is done for the
+ * little-endian PPC64 user-mode target.
+ */
#define LVE(name, access, swap, element) \
void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
target_ulong addr) \
{ \
size_t n_elems = ARRAY_SIZE(r->element); \
- int adjust = HI_IDX*(n_elems - 1); \
+ int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
if (msr_le) { \
@@ -476,12 +480,13 @@ VSX_STXVL(stxvll, 1)
void helper_tbegin(CPUPPCState *env)
{
- /* As a degenerate implementation, always fail tbegin. The reason
+ /*
+ * As a degenerate implementation, always fail tbegin. The reason
* given is "Nesting overflow". The "persistent" bit is set,
* providing a hint to the error handler to not retry. The TFIAR
* captures the address of the failure, which is this tbegin
- * instruction. Instruction execution will continue with the
- * next instruction in memory, which is precisely what we want.
+ * instruction. Instruction execution will continue with the next
+ * instruction in memory, which is precisely what we want.
*/
env->spr[SPR_TEXASR] =
diff --git a/target/ppc/mfrom_table.inc.c b/target/ppc/mfrom_table.inc.c
index 6a1fa37..1653b97 100644
--- a/target/ppc/mfrom_table.inc.c
+++ b/target/ppc/mfrom_table.inc.c
@@ -1,5 +1,4 @@
-static const uint8_t mfrom_ROM_table[602] =
-{
+static const uint8_t mfrom_ROM_table[602] = {
77, 77, 76, 76, 75, 75, 74, 74,
73, 73, 72, 72, 71, 71, 70, 70,
69, 69, 68, 68, 68, 67, 67, 66,
diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c
index 6317918..f96c426 100644
--- a/target/ppc/mfrom_table_gen.c
+++ b/target/ppc/mfrom_table_gen.c
@@ -2,7 +2,7 @@
#include "qemu/osdep.h"
#include <math.h>
-int main (void)
+int main(void)
{
double d;
uint8_t n;
@@ -10,7 +10,8 @@ int main (void)
printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
for (i = 0; i < 602; i++) {
- /* Extremely decomposed:
+ /*
+ * Extremely decomposed:
* -T0 / 256
* T0 = 256 * log10(10 + 1.0) + 0.5
*/
@@ -23,8 +24,9 @@ int main (void)
d += 0.5;
n = d;
printf("%3d, ", n);
- if ((i & 7) == 7)
+ if ((i & 7) == 7) {
printf("\n ");
+ }
}
printf("\n};\n");
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index c65d1ad..0a81e98 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -210,10 +210,11 @@ void ppc_store_msr(CPUPPCState *env, target_ulong value)
hreg_store_msr(env, value, 0);
}
-/* This code is lifted from MacOnLinux. It is called whenever
- * THRM1,2 or 3 is read an fixes up the values in such a way
- * that will make MacOS not hang. These registers exist on some
- * 75x and 74xx processors.
+/*
+ * This code is lifted from MacOnLinux. It is called whenever THRM1,2
+ * or 3 is read an fixes up the values in such a way that will make
+ * MacOS not hang. These registers exist on some 75x and 74xx
+ * processors.
*/
void helper_fixup_thrm(CPUPPCState *env)
{
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index e8562a7..55cf156 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -27,7 +27,7 @@
#include "mmu-hash32.h"
#include "exec/log.h"
-//#define DEBUG_BAT
+/* #define DEBUG_BAT */
#ifdef DEBUG_BATS
# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
@@ -228,8 +228,10 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
- /* Memory-forced I/O controller interface access */
- /* If T=1 and BUID=x'07F', the 601 performs a memory access
+ /*
+ * Memory-forced I/O controller interface access
+ *
+ * If T=1 and BUID=x'07F', the 601 performs a memory access
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
*/
*raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
@@ -265,9 +267,11 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
}
return 1;
case ACCESS_CACHE:
- /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
- /* Should make the instruction do no-op.
- * As it already do no-op, it's quite easy :-)
+ /*
+ * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
+ *
+ * Should make the instruction do no-op. As it already do
+ * no-op, it's quite easy :-)
*/
*raddr = eaddr;
return 0;
@@ -341,6 +345,24 @@ static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
return -1;
}
+static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+ hwaddr offset = pte_offset + 6;
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
+}
+
+static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+ hwaddr offset = pte_offset + 7;
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
+}
+
static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
target_ulong sr, target_ulong eaddr,
ppc_hash_pte32_t *pte)
@@ -399,7 +421,6 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
hwaddr pte_offset;
ppc_hash_pte32_t pte;
int prot;
- uint32_t new_pte1;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
hwaddr raddr;
@@ -515,18 +536,20 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* 8. Update PTE referenced and changed bits if necessary */
- new_pte1 = pte.pte1 | HPTE32_R_R; /* set referenced bit */
- if (rwx == 1) {
- new_pte1 |= HPTE32_R_C; /* set changed (dirty) bit */
- } else {
- /* Treat the page as read-only for now, so that a later write
- * will pass through this function again to set the C bit */
- prot &= ~PAGE_WRITE;
- }
-
- if (new_pte1 != pte.pte1) {
- ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
+ if (!(pte.pte1 & HPTE32_R_R)) {
+ ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
}
+ if (!(pte.pte1 & HPTE32_R_C)) {
+ if (rwx == 1) {
+ ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
+ } else {
+ /*
+ * Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit
+ */
+ prot &= ~PAGE_WRITE;
+ }
+ }
/* 9. Determine the real address from the PTE */
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 214149f..7899eb2 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -30,7 +30,7 @@
#include "hw/hw.h"
#include "mmu-book3s-v3.h"
-//#define DEBUG_SLB
+/* #define DEBUG_SLB */
#ifdef DEBUG_SLB
# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
@@ -58,9 +58,11 @@ static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
LOG_SLB("%s: slot %d %016" PRIx64 " %016"
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
- /* We check for 1T matches on all MMUs here - if the MMU
+ /*
+ * We check for 1T matches on all MMUs here - if the MMU
* doesn't have 1T segment support, we will have prevented 1T
- * entries from being inserted in the slbmte code. */
+ * entries from being inserted in the slbmte code.
+ */
if (((slb->esid == esid_256M) &&
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|| ((slb->esid == esid_1T) &&
@@ -103,7 +105,8 @@ void helper_slbia(CPUPPCState *env)
if (slb->esid & SLB_ESID_V) {
slb->esid &= ~SLB_ESID_V;
- /* XXX: given the fact that segment size is 256 MB or 1TB,
+ /*
+ * XXX: given the fact that segment size is 256 MB or 1TB,
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
@@ -126,7 +129,8 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr,
if (slb->esid & SLB_ESID_V) {
slb->esid &= ~SLB_ESID_V;
- /* XXX: given the fact that segment size is 256 MB or 1TB,
+ /*
+ * XXX: given the fact that segment size is 256 MB or 1TB,
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
@@ -306,8 +310,10 @@ static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
{
CPUPPCState *env = &cpu->env;
unsigned pp, key;
- /* Some pp bit combinations have undefined behaviour, so default
- * to no access in those cases */
+ /*
+ * Some pp bit combinations have undefined behaviour, so default
+ * to no access in those cases
+ */
int prot = 0;
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
@@ -376,7 +382,7 @@ static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
}
key = HPTE64_R_KEY(pte.pte1);
- amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
+ amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
/* env->spr[SPR_AMR]); */
@@ -547,8 +553,9 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
if (*pshift == 0) {
continue;
}
- /* We don't do anything with pshift yet as qemu TLB only deals
- * with 4K pages anyway
+ /*
+ * We don't do anything with pshift yet as qemu TLB only
+ * deals with 4K pages anyway
*/
pte->pte0 = pte0;
pte->pte1 = pte1;
@@ -572,8 +579,10 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
uint64_t vsid, epnmask, epn, ptem;
const PPCHash64SegmentPageSizes *sps = slb->sps;
- /* The SLB store path should prevent any bad page size encodings
- * getting in there, so: */
+ /*
+ * The SLB store path should prevent any bad page size encodings
+ * getting in there, so:
+ */
assert(sps);
/* If ISL is set in LPCR we need to clamp the page size to 4K */
@@ -716,6 +725,39 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
}
+static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
+{
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
+ return;
+ }
+ base = ppc_hash64_hpt_base(cpu);
+
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
+}
+
+static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
+{
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
+ return;
+ }
+ base = ppc_hash64_hpt_base(cpu);
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
+}
+
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
int rwx, int mmu_idx)
{
@@ -726,23 +768,25 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
hwaddr ptex;
ppc_hash_pte64_t pte;
int exec_prot, pp_prot, amr_prot, prot;
- uint64_t new_pte1;
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
hwaddr raddr;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
- /* Note on LPCR usage: 970 uses HID4, but our special variant
- * of store_spr copies relevant fields into env->spr[SPR_LPCR].
- * Similarily we filter unimplemented bits when storing into
- * LPCR depending on the MMU version. This code can thus just
- * use the LPCR "as-is".
+ /*
+ * Note on LPCR usage: 970 uses HID4, but our special variant of
+ * store_spr copies relevant fields into env->spr[SPR_LPCR].
+ * Similarily we filter unimplemented bits when storing into LPCR
+ * depending on the MMU version. This code can thus just use the
+ * LPCR "as-is".
*/
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
- /* Translation is supposedly "off" */
- /* In real mode the top 4 effective address bits are (mostly) ignored */
+ /*
+ * Translation is supposedly "off", but in real mode the top 4
+ * effective address bits are (mostly) ignored
+ */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
/* In HV mode, add HRMOR if top EA bit is clear */
@@ -871,17 +915,19 @@ skip_slb_search:
/* 6. Update PTE referenced and changed bits if necessary */
- new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
- if (rwx == 1) {
- new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
- } else {
- /* Treat the page as read-only for now, so that a later write
- * will pass through this function again to set the C bit */
- prot &= ~PAGE_WRITE;
+ if (!(pte.pte1 & HPTE64_R_R)) {
+ ppc_hash64_set_r(cpu, ptex, pte.pte1);
}
-
- if (new_pte1 != pte.pte1) {
- ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
+ if (!(pte.pte1 & HPTE64_R_C)) {
+ if (rwx == 1) {
+ ppc_hash64_set_c(cpu, ptex, pte.pte1);
+ } else {
+ /*
+ * Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit
+ */
+ prot &= ~PAGE_WRITE;
+ }
}
/* 7. Determine the real address from the PTE */
@@ -940,24 +986,6 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
& TARGET_PAGE_MASK;
}
-void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
- uint64_t pte0, uint64_t pte1)
-{
- hwaddr base;
- hwaddr offset = ptex * HASH_PTE_SIZE_64;
-
- if (cpu->vhyp) {
- PPCVirtualHypervisorClass *vhc =
- PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
- return;
- }
- base = ppc_hash64_hpt_base(cpu);
-
- stq_phys(CPU(cpu)->as, base + offset, pte0);
- stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
-}
-
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
target_ulong pte0, target_ulong pte1)
{
@@ -1023,8 +1051,9 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
return;
}
- /* Make one up. Mostly ignore the ESID which will not be
- * needed for translation
+ /*
+ * Make one up. Mostly ignore the ESID which will not be needed
+ * for translation
*/
vsid = SLB_VSID_VRMA;
vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
@@ -1080,11 +1109,12 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
}
env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
- /* XXX We could also write LPID from HID4 here
+ /*
+ * XXX We could also write LPID from HID4 here
* but since we don't tag any translation on it
* it doesn't actually matter
- */
- /* XXX For proper emulation of 970 we also need
+ *
+ * XXX For proper emulation of 970 we also need
* to dig HRMOR out of HID5
*/
break;
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
index 5be7ad8..87729d4 100644
--- a/target/ppc/mmu-hash64.h
+++ b/target/ppc/mmu-hash64.h
@@ -10,8 +10,6 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
int mmu_idx);
-void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
- uint64_t pte0, uint64_t pte1);
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index ca1fb26..066e324 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -228,10 +228,10 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
ppc_v3_pate_t pate;
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
- assert(ppc64_use_proc_tbl(cpu));
- /* Real Mode Access */
- if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
+ /* HV or virtual hypervisor Real Mode Access */
+ if ((msr_hv || cpu->vhyp) &&
+ (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) {
/* In real mode top 4 effective addr bits (mostly) ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
@@ -241,6 +241,16 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
return 0;
}
+ /*
+ * Check UPRT (we avoid the check in real mode to deal with
+ * transitional states during kexec.
+ */
+ if (!ppc64_use_proc_tbl(cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "LPCR:UPRT not set in radix mode ! LPCR="
+ TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
+ }
+
/* Virtual Mode Access - get the fully qualified address */
if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
ppc_radix64_raise_segi(cpu, rwx, eaddr);
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index ab72473..1dbc9ac 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -33,11 +33,11 @@
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
-//#define DEBUG_MMU
-//#define DEBUG_BATS
-//#define DEBUG_SOFTWARE_TLB
-//#define DUMP_PAGE_TABLES
-//#define FLUSH_ALL_TLBS
+/* #define DEBUG_MMU */
+/* #define DEBUG_BATS */
+/* #define DEBUG_SOFTWARE_TLB */
+/* #define DUMP_PAGE_TABLES */
+/* #define FLUSH_ALL_TLBS */
#ifdef DEBUG_MMU
# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
@@ -152,7 +152,8 @@ static int check_prot(int prot, int rw, int access_type)
}
static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
- target_ulong pte1, int h, int rw, int type)
+ target_ulong pte1, int h,
+ int rw, int type)
{
target_ulong ptem, mmask;
int access, ret, pteh, ptev, pp;
@@ -332,7 +333,8 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
- switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
+ switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
+ 0, rw, access_type)) {
case -3:
/* TLB inconsistency */
return -1;
@@ -347,9 +349,11 @@ static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
break;
case 0:
/* access granted */
- /* XXX: we should go on looping to check all TLBs consistency
- * but we can speed-up the whole thing as the
- * result would be undefined if TLBs are not consistent.
+ /*
+ * XXX: we should go on looping to check all TLBs
+ * consistency but we can speed-up the whole thing as
+ * the result would be undefined if TLBs are not
+ * consistent.
*/
ret = 0;
best = nr;
@@ -550,14 +554,18 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
/* Direct-store segment : absolutely *BUGGY* for now */
- /* Direct-store implies a 32-bit MMU.
+ /*
+ * Direct-store implies a 32-bit MMU.
* Check the Segment Register's bus unit ID (BUID).
*/
sr = env->sr[eaddr >> 28];
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
- /* Memory-forced I/O controller interface access */
- /* If T=1 and BUID=x'07F', the 601 performs a memory access
- * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
+ /*
+ * Memory-forced I/O controller interface access
+ *
+ * If T=1 and BUID=x'07F', the 601 performs a memory
+ * access to SR[28-31] LA[4-31], bypassing all protection
+ * mechanisms.
*/
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
@@ -578,9 +586,11 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* lwarx, ldarx or srwcx. */
return -4;
case ACCESS_CACHE:
- /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
- /* Should make the instruction do no-op.
- * As it already do no-op, it's quite easy :-)
+ /*
+ * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
+ *
+ * Should make the instruction do no-op. As it already do
+ * no-op, it's quite easy :-)
*/
ctx->raddr = eaddr;
return 0;
@@ -942,12 +952,14 @@ static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
return esr;
}
-/* Get EPID register given the mmu_idx. If this is regular load,
- * construct the EPID access bits from current processor state */
-
-/* Get the effective AS and PR bits and the PID. The PID is returned only if
- * EPID load is requested, otherwise the caller must detect the correct EPID.
- * Return true if valid EPID is returned. */
+/*
+ * Get EPID register given the mmu_idx. If this is regular load,
+ * construct the EPID access bits from current processor state
+ *
+ * Get the effective AS and PR bits and the PID. The PID is returned
+ * only if EPID load is requested, otherwise the caller must detect
+ * the correct EPID. Return true if valid EPID is returned.
+ */
static bool mmubooke206_get_as(CPUPPCState *env,
int mmu_idx, uint32_t *epid_out,
bool *as_out, bool *pr_out)
@@ -1369,8 +1381,9 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
case POWERPC_MMU_SOFT_4xx_Z:
if (unlikely(msr_pe != 0)) {
- /* 403 family add some particular protections,
- * using PBL/PBU registers for accesses with no translation.
+ /*
+ * 403 family add some particular protections, using
+ * PBL/PBU registers for accesses with no translation.
*/
in_plb =
/* Check PLB validity */
@@ -1453,7 +1466,8 @@ static int get_physical_address_wtlb(
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
} else {
- cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
+ cpu_abort(CPU(cpu),
+ "PowerPC in real mode do not do any translation\n");
}
return -1;
default:
@@ -1498,9 +1512,10 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
- /* Some MMUs have separate TLBs for code and data. If we only try an
- * ACCESS_INT, we may not be able to read instructions mapped by code
- * TLBs, so we also try a ACCESS_CODE.
+ /*
+ * Some MMUs have separate TLBs for code and data. If we only
+ * try an ACCESS_INT, we may not be able to read instructions
+ * mapped by code TLBs, so we also try a ACCESS_CODE.
*/
if (unlikely(get_physical_address(env, &ctx, addr, 0,
ACCESS_CODE) != 0)) {
@@ -1805,6 +1820,13 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
base = BATu & ~0x0001FFFF;
end = base + mask + 0x00020000;
+ if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
+ /* Flushing 1024 4K pages is slower than a complete flush */
+ LOG_BATS("Flush all BATs\n");
+ tlb_flush(CPU(cs));
+ LOG_BATS("Flush done\n");
+ return;
+ }
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
TARGET_FMT_lx ")\n", base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
@@ -1834,8 +1856,9 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#endif
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
@@ -1865,8 +1888,9 @@ void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
dump_store_bat(env, 'D', 0, nr, value);
if (env->DBAT[0][nr] != value) {
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
mask = (value << 15) & 0x0FFE0000UL;
#if !defined(FLUSH_ALL_TLBS)
@@ -1913,8 +1937,9 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
do_inval = 1;
#endif
}
- /* When storing valid upper BAT, mask BEPI and BRPN
- * and invalidate all TLBs covered by this BAT
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
*/
env->IBAT[0][nr] = (value & 0x00001FFFUL) |
(value & ~0x0001FFFFUL & ~mask);
@@ -2027,7 +2052,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
/* tlbie invalidate TLBs for all segments */
- /* XXX: given the fact that there are too many segments to invalidate,
+ /*
+ * XXX: given the fact that there are too many segments to invalidate,
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
@@ -2044,10 +2070,11 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
- /* Actual CPUs invalidate entire congruence classes based on the
- * geometry of their TLBs and some OSes take that into account,
- * we just mark the TLB to be flushed later (context synchronizing
- * event or sync instruction on 32-bit).
+ /*
+ * Actual CPUs invalidate entire congruence classes based on
+ * the geometry of their TLBs and some OSes take that into
+ * account, we just mark the TLB to be flushed later (context
+ * synchronizing event or sync instruction on 32-bit).
*/
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
@@ -2152,8 +2179,10 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
#endif
if (env->sr[srnum] != value) {
env->sr[srnum] = value;
-/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
- flusing the whole TLB. */
+ /*
+ * Invalidating 256MB of virtual memory in 4kB pages is way
+ * longer than flusing the whole TLB.
+ */
#if !defined(FLUSH_ALL_TLBS) && 0
{
target_ulong page, end;
@@ -2264,10 +2293,12 @@ target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
int nb_BATs;
target_ulong ret = 0;
- /* We don't have to generate many instances of this instruction,
+ /*
+ * We don't have to generate many instances of this instruction,
* as rac is supervisor only.
+ *
+ * XXX: FIX THIS: Pretend we have no BAT
*/
- /* XXX: FIX THIS: Pretend we have no BAT */
nb_BATs = env->nb_BATs;
env->nb_BATs = 0;
if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
@@ -2422,7 +2453,8 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
}
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
& PPC4XX_TLBHI_SIZE_MASK);
- /* We cannot handle TLB size < TARGET_PAGE_SIZE.
+ /*
+ * We cannot handle TLB size < TARGET_PAGE_SIZE.
* If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
*/
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
@@ -2742,7 +2774,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (tlb->mas1 & MAS1_VALID) {
- /* Invalidate the page in QEMU TLB if it was a valid entry.
+ /*
+ * Invalidate the page in QEMU TLB if it was a valid entry.
*
* In "PowerPC e500 Core Family Reference Manual, Rev. 1",
* Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
@@ -2751,7 +2784,8 @@ void helper_booke206_tlbwe(CPUPPCState *env)
* "Note that when an L2 TLB entry is written, it may be displacing an
* already valid entry in the same L2 TLB location (a victim). If a
* valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
- * TLB entry is automatically invalidated." */
+ * TLB entry is automatically invalidated."
+ */
flush_page(env, tlb);
}
@@ -2777,8 +2811,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
if (!msr_cm) {
- /* Executing a tlbwe instruction in 32-bit mode will set
- * bits 0:31 of the TLB EPN field to zero.
+ /*
+ * Executing a tlbwe instruction in 32-bit mode will set bits
+ * 0:31 of the TLB EPN field to zero.
*/
mask &= 0xffffffff;
}
@@ -3022,10 +3057,13 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
/*****************************************************************************/
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
+/*
+ * try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ *
+ * XXX: fix it to restore all registers
+ */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c
index 42e5816..ee9d6e8 100644
--- a/target/ppc/monitor.c
+++ b/target/ppc/monitor.c
@@ -27,32 +27,33 @@
#include "monitor/hmp-target.h"
#include "hmp.h"
-static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
+static target_long monitor_get_ccr(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
unsigned int u;
int i;
u = 0;
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++) {
u |= env->crf[i] << (32 - (4 * (i + 1)));
+ }
return u;
}
-static target_long monitor_get_decr (const struct MonitorDef *md, int val)
+static target_long monitor_get_decr(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_decr(env);
}
-static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
+static target_long monitor_get_tbu(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_tbu(env);
}
-static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
+static target_long monitor_get_tbl(const struct MonitorDef *md, int val)
{
CPUArchState *env = mon_get_cpu_env();
return cpu_ppc_load_tbl(env);
diff --git a/target/ppc/trace-events b/target/ppc/trace-events
index 3858f97..7b3cfe1 100644
--- a/target/ppc/trace-events
+++ b/target/ppc/trace-events
@@ -1,5 +1,30 @@
# See docs/devel/tracing.txt for syntax documentation.
# kvm.c
-kvm_failed_spr_set(int str, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
-kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
+kvm_failed_spr_set(int spr, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
+kvm_failed_spr_get(int spr, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
+kvm_failed_fpscr_set(const char *msg) "Unable to set FPSCR to KVM: %s"
+kvm_failed_fp_set(const char *fpname, int fpnum, const char *msg) "Unable to set %s%d to KVM: %s"
+kvm_failed_vscr_set(const char *msg) "Unable to set VSCR to KVM: %s"
+kvm_failed_vr_set(int vr, const char *msg) "Unable to set VR%d to KVM: %s"
+kvm_failed_fpscr_get(const char *msg) "Unable to get FPSCR from KVM: %s"
+kvm_failed_fp_get(const char *fpname, int fpnum, const char *msg) "Unable to get %s%d from KVM: %s"
+kvm_failed_vscr_get(const char *msg) "Unable to get VSCR from KVM: %s"
+kvm_failed_vr_get(int vr, const char *msg) "Unable to get VR%d from KVM: %s"
+kvm_failed_vpa_addr_get(const char *msg) "Unable to get VPA address from KVM: %s"
+kvm_failed_slb_get(const char *msg) "Unable to get SLB shadow state from KVM: %s"
+kvm_failed_dtl_get(const char *msg) "Unable to get dispatch trace log state from KVM: %s"
+kvm_failed_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
+kvm_failed_slb_set(const char *msg) "Unable to set SLB shadow state to KVM: %s"
+kvm_failed_dtl_set(const char *msg) "Unable to set dispatch trace log state to KVM: %s"
+kvm_failed_null_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
+kvm_failed_put_vpa(void) "Warning: Unable to set VPA information to KVM"
+kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM"
+kvm_injected_interrupt(int irq) "injected interrupt %d"
+kvm_handle_dcr_write(void) "handle dcr write"
+kvm_handle_drc_read(void) "handle dcr read"
+kvm_handle_halt(void) "handle halt"
+kvm_handle_papr_hcall(void) "handle PAPR hypercall"
+kvm_handle_epr(void) "handle epr"
+kvm_handle_watchdog_expiry(void) "handle watchdog expiry"
+kvm_handle_debug_exception(void) "handle debug exception"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 93d77a2..c280e0d 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -42,8 +42,8 @@
#define GDBSTUB_SINGLE_STEP 0x4
/* Include definitions for instructions classes and implementations flags */
-//#define PPC_DEBUG_DISAS
-//#define DO_PPC_STATISTICS
+/* #define PPC_DEBUG_DISAS */
+/* #define DO_PPC_STATISTICS */
#ifdef PPC_DEBUG_DISAS
# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
@@ -54,9 +54,9 @@
/* Code translation helpers */
/* global register indexes */
-static char cpu_reg_names[10*3 + 22*4 /* GPR */
- + 10*4 + 22*5 /* SPE GPRh */
- + 8*5 /* CRF */];
+static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */
+ + 10 * 4 + 22 * 5 /* SPE GPRh */
+ + 8 * 5 /* CRF */];
static TCGv cpu_gpr[32];
static TCGv cpu_gprh[32];
static TCGv_i32 cpu_crf[8];
@@ -78,7 +78,7 @@ static TCGv_i32 cpu_access_type;
void ppc_translate_init(void)
{
int i;
- char* p;
+ char *p;
size_t cpu_reg_names_size;
p = cpu_reg_names;
@@ -146,7 +146,8 @@ void ppc_translate_init(void)
offsetof(CPUPPCState, fpscr), "fpscr");
cpu_access_type = tcg_global_mem_new_i32(cpu_env,
- offsetof(CPUPPCState, access_type), "access_type");
+ offsetof(CPUPPCState, access_type),
+ "access_type");
}
/* internal defines */
@@ -246,8 +247,9 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
{
TCGv_i32 t0, t1;
- /* These are all synchronous exceptions, we set the PC back to
- * the faulting instruction
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
*/
if (ctx->exception == POWERPC_EXCP_NONE) {
gen_update_nip(ctx, ctx->base.pc_next - 4);
@@ -264,8 +266,9 @@ static void gen_exception(DisasContext *ctx, uint32_t excp)
{
TCGv_i32 t0;
- /* These are all synchronous exceptions, we set the PC back to
- * the faulting instruction
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
*/
if (ctx->exception == POWERPC_EXCP_NONE) {
gen_update_nip(ctx, ctx->base.pc_next - 4);
@@ -320,8 +323,9 @@ static void gen_debug_exception(DisasContext *ctx)
{
TCGv_i32 t0;
- /* These are all synchronous exceptions, we set the PC back to
- * the faulting instruction
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
*/
if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
(ctx->exception != POWERPC_EXCP_SYNC)) {
@@ -602,9 +606,11 @@ static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
tcg_gen_movi_tl(t0, CRF_EQ);
tcg_gen_movi_tl(t1, CRF_LT);
- tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), t0, arg0, arg1, t1, t0);
+ tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
+ t0, arg0, arg1, t1, t0);
tcg_gen_movi_tl(t1, CRF_GT);
- tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), t0, arg0, arg1, t1, t0);
+ tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
+ t0, arg0, arg1, t1, t0);
tcg_gen_trunc_tl_i32(t, t0);
tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
@@ -840,9 +846,11 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
if (compute_ca) {
if (NARROW_MODE(ctx)) {
- /* Caution: a non-obvious corner case of the spec is that we
- must produce the *entire* 64-bit addition, but produce the
- carry into bit 32. */
+ /*
+ * Caution: a non-obvious corner case of the spec is that
+ * we must produce the *entire* 64-bit addition, but
+ * produce the carry into bit 32.
+ */
TCGv t1 = tcg_temp_new();
tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
tcg_gen_add_tl(t0, arg1, arg2);
@@ -1017,12 +1025,13 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, ret);
+ }
}
/* Div functions */
#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
@@ -1091,12 +1100,13 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_temp_free_i64(t2);
tcg_temp_free_i64(t3);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, ret);
+ }
}
#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
@@ -1219,8 +1229,9 @@ static void gen_mulhw(DisasContext *ctx)
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mulhwu mulhwu. */
@@ -1235,8 +1246,9 @@ static void gen_mulhwu(DisasContext *ctx)
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mullw mullw. */
@@ -1255,8 +1267,9 @@ static void gen_mullw(DisasContext *ctx)
tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
#endif
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mullwo mullwo. */
@@ -1284,8 +1297,9 @@ static void gen_mullwo(DisasContext *ctx)
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mulli */
@@ -1325,8 +1339,9 @@ static void gen_mulld(DisasContext *ctx)
{
tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mulldo mulldo. */
@@ -1369,9 +1384,11 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
if (compute_ca) {
/* dest = ~arg1 + arg2 [+ ca]. */
if (NARROW_MODE(ctx)) {
- /* Caution: a non-obvious corner case of the spec is that we
- must produce the *entire* 64-bit addition, but produce the
- carry into bit 32. */
+ /*
+ * Caution: a non-obvious corner case of the spec is that
+ * we must produce the *entire* 64-bit addition, but
+ * produce the carry into bit 32.
+ */
TCGv inv1 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
@@ -1404,8 +1421,10 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
}
} else if (add_ca) {
- /* Since we're ignoring carry-out, we can simplify the
- standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
+ /*
+ * Since we're ignoring carry-out, we can simplify the
+ * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
+ */
tcg_gen_sub_tl(t0, arg2, arg1);
tcg_gen_add_tl(t0, t0, cpu_ca);
tcg_gen_subi_tl(t0, t0, 1);
@@ -1493,7 +1512,7 @@ static void gen_nego(DisasContext *ctx)
/*** Integer logical ***/
#define GEN_LOGICAL2(name, tcg_op, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
cpu_gpr[rB(ctx->opcode)]); \
@@ -1502,7 +1521,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_LOGICAL1(name, tcg_op, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
if (unlikely(Rc(ctx->opcode) != 0)) \
@@ -1517,14 +1536,16 @@ GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
/* andi. */
static void gen_andi_(DisasContext *ctx)
{
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ UIMM(ctx->opcode));
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
/* andis. */
static void gen_andis_(DisasContext *ctx)
{
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ UIMM(ctx->opcode) << 16);
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1538,8 +1559,9 @@ static void gen_cntlzw(DisasContext *ctx)
tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
tcg_temp_free_i32(t);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* cnttzw */
@@ -1591,12 +1613,14 @@ static void gen_or(DisasContext *ctx)
rb = rB(ctx->opcode);
/* Optimisation for mr. ri case */
if (rs != ra || rs != rb) {
- if (rs != rb)
+ if (rs != rb) {
tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
- else
+ } else {
tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[ra]);
+ }
} else if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rs]);
#if defined(TARGET_PPC64)
@@ -1654,10 +1678,11 @@ static void gen_or(DisasContext *ctx)
tcg_temp_free(t0);
}
#if !defined(CONFIG_USER_ONLY)
- /* Pause out of TCG otherwise spin loops with smt_low eat too much
- * CPU and the kernel hangs. This applies to all encodings other
- * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30),
- * and all currently undefined.
+ /*
+ * Pause out of TCG otherwise spin loops with smt_low eat too
+ * much CPU and the kernel hangs. This applies to all
+ * encodings other than no-op, e.g., miso(rs=26), yield(27),
+ * mdoio(29), mdoom(30), and all currently undefined.
*/
gen_pause(ctx);
#endif
@@ -1671,12 +1696,15 @@ GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
static void gen_xor(DisasContext *ctx)
{
/* Optimisation for "set to zero" case */
- if (rS(ctx->opcode) != rB(ctx->opcode))
- tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- else
+ if (rS(ctx->opcode) != rB(ctx->opcode)) {
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ } else {
tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* ori */
@@ -1699,7 +1727,8 @@ static void gen_oris(DisasContext *ctx)
/* NOP */
return;
}
- tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ uimm << 16);
}
/* xori */
@@ -1723,7 +1752,8 @@ static void gen_xoris(DisasContext *ctx)
/* NOP */
return;
}
- tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ uimm << 16);
}
/* popcntb : PowerPC 2.03 specification */
@@ -1798,8 +1828,9 @@ GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
static void gen_cntlzd(DisasContext *ctx)
{
tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* cnttzd */
@@ -1838,7 +1869,7 @@ static void gen_rlwimi(DisasContext *ctx)
uint32_t mb = MB(ctx->opcode);
uint32_t me = ME(ctx->opcode);
- if (sh == (31-me) && mb <= me) {
+ if (sh == (31 - me) && mb <= me) {
tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
} else {
target_ulong mask;
@@ -2141,8 +2172,9 @@ static void gen_slw(DisasContext *ctx)
tcg_temp_free(t1);
tcg_temp_free(t0);
tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sraw & sraw. */
@@ -2150,8 +2182,9 @@ static void gen_sraw(DisasContext *ctx)
{
gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srawi & srawi. */
@@ -2206,8 +2239,9 @@ static void gen_srw(DisasContext *ctx)
tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t1);
tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
#if defined(TARGET_PPC64)
@@ -2226,8 +2260,9 @@ static void gen_sld(DisasContext *ctx)
tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t1);
tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srad & srad. */
@@ -2235,8 +2270,9 @@ static void gen_srad(DisasContext *ctx)
{
gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sradi & sradi. */
static inline void gen_sradi(DisasContext *ctx, int n)
@@ -2317,8 +2353,9 @@ static void gen_srd(DisasContext *ctx)
tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t1);
tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
#endif
@@ -2463,7 +2500,7 @@ GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
#endif
#define GEN_LD(name, ldop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
gen_set_access_type(ctx, ACCESS_INT); \
@@ -2474,7 +2511,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_LDU(name, ldop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
@@ -2494,7 +2531,7 @@ static void glue(gen_, name##u)(DisasContext *ctx)
}
#define GEN_LDUX(name, ldop, opc2, opc3, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0 || \
@@ -2598,8 +2635,9 @@ static void gen_ld(DisasContext *ctx)
/* ld - ldu */
gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
- if (Rc(ctx->opcode))
+ if (Rc(ctx->opcode)) {
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ }
tcg_temp_free(EA);
}
@@ -2669,7 +2707,7 @@ static void gen_lq(DisasContext *ctx)
/*** Integer store ***/
#define GEN_ST(name, stop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
gen_set_access_type(ctx, ACCESS_INT); \
@@ -2680,7 +2718,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_STU(name, stop, opc, type) \
-static void glue(gen_, stop##u)(DisasContext *ctx) \
+static void glue(gen_, stop##u)(DisasContext *ctx) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
@@ -2699,7 +2737,7 @@ static void glue(gen_, stop##u)(DisasContext *ctx)
}
#define GEN_STUX(name, stop, opc2, opc3, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
if (unlikely(rA(ctx->opcode) == 0)) { \
@@ -2847,8 +2885,9 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
- if (Rc(ctx->opcode))
+ if (Rc(ctx->opcode)) {
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ }
tcg_temp_free(EA);
}
}
@@ -2916,10 +2955,11 @@ static void gen_stmw(DisasContext *ctx)
/*** Integer load and store strings ***/
/* lswi */
-/* PowerPC32 specification says we must generate an exception if
- * rA is in the range of registers to be loaded.
- * In an other hand, IBM says this is valid, but rA won't be loaded.
- * For now, I'll follow the spec...
+/*
+ * PowerPC32 specification says we must generate an exception if rA is
+ * in the range of registers to be loaded. In an other hand, IBM says
+ * this is valid, but rA won't be loaded. For now, I'll follow the
+ * spec...
*/
static void gen_lswi(DisasContext *ctx)
{
@@ -2934,8 +2974,9 @@ static void gen_lswi(DisasContext *ctx)
gen_align_no_le(ctx);
return;
}
- if (nb == 0)
+ if (nb == 0) {
nb = 32;
+ }
nr = DIV_ROUND_UP(nb, 4);
if (unlikely(lsw_reg_in_range(start, nr, ra))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
@@ -2989,8 +3030,9 @@ static void gen_stswi(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
- if (nb == 0)
+ if (nb == 0) {
nb = 32;
+ }
t1 = tcg_const_i32(nb);
t2 = tcg_const_i32(rS(ctx->opcode));
gen_helper_stsw(cpu_env, t0, t1, t2);
@@ -3363,8 +3405,10 @@ static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop)
gen_set_label(l1);
- /* Address mismatch implies failure. But we still need to provide the
- memory barrier semantics of the instruction. */
+ /*
+ * Address mismatch implies failure. But we still need to provide
+ * the memory barrier semantics of the instruction.
+ */
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
@@ -3639,8 +3683,9 @@ static void gen_rvwinkle(DisasContext *ctx)
static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
{
#if defined(TARGET_PPC64)
- if (ctx->has_cfar)
+ if (ctx->has_cfar) {
tcg_gen_movi_tl(cpu_cfar, nip);
+ }
#endif
}
@@ -3732,17 +3777,19 @@ static void gen_bcond(DisasContext *ctx, int type)
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
target = tcg_temp_local_new();
- if (type == BCOND_CTR)
+ if (type == BCOND_CTR) {
tcg_gen_mov_tl(target, cpu_ctr);
- else if (type == BCOND_TAR)
+ } else if (type == BCOND_TAR) {
gen_load_spr(target, SPR_TAR);
- else
+ } else {
tcg_gen_mov_tl(target, cpu_lr);
+ }
} else {
target = NULL;
}
- if (LK(ctx->opcode))
+ if (LK(ctx->opcode)) {
gen_setlr(ctx, ctx->base.pc_next);
+ }
l1 = gen_new_label();
if ((bo & 0x4) == 0) {
/* Decrement and test CTR */
@@ -3857,7 +3904,7 @@ static void gen_bctar(DisasContext *ctx)
/*** Condition register logical ***/
#define GEN_CRLOGIC(name, tcg_op, opc) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
uint8_t bitmask; \
int sh; \
@@ -3918,7 +3965,8 @@ static void gen_rfi(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
- /* This instruction doesn't exist anymore on 64-bit server
+ /*
+ * This instruction doesn't exist anymore on 64-bit server
* processors compliant with arch 2.x
*/
if (is_book3s_arch2x(ctx)) {
@@ -4157,7 +4205,7 @@ static void gen_mfcr(DisasContext *ctx)
if (likely(ctx->opcode & 0x00100000)) {
crm = CRM(ctx->opcode);
if (likely(crm && ((crm & (crm - 1)) == 0))) {
- crn = ctz32 (crm);
+ crn = ctz32(crm);
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
cpu_gpr[rD(ctx->opcode)], crn * 4);
@@ -4222,7 +4270,8 @@ static inline void gen_op_mfspr(DisasContext *ctx)
(*read_cb)(ctx, rD(ctx->opcode), sprn);
} else {
/* Privilege exception */
- /* This is a hack to avoid warnings when running Linux:
+ /*
+ * This is a hack to avoid warnings when running Linux:
* this OS breaks the PowerPC virtualisation model,
* allowing userland application to read the PVR
*/
@@ -4245,8 +4294,9 @@ static inline void gen_op_mfspr(DisasContext *ctx)
"Trying to read invalid spr %d (0x%03x) at "
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
- /* The behaviour depends on MSR:PR and SPR# bit 0x10,
- * it can generate a priv, a hv emu or a no-op
+ /*
+ * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
+ * generate a priv, a hv emu or a no-op
*/
if (sprn & 0x10) {
if (ctx->pr) {
@@ -4280,7 +4330,7 @@ static void gen_mtcrf(DisasContext *ctx)
if (likely((ctx->opcode & 0x00100000))) {
if (crm && ((crm & (crm - 1)) == 0)) {
TCGv_i32 temp = tcg_temp_new_i32();
- crn = ctz32 (crm);
+ crn = ctz32(crm);
tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
tcg_gen_shri_i32(temp, temp, crn * 4);
tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
@@ -4309,14 +4359,17 @@ static void gen_mtmsrd(DisasContext *ctx)
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
TCGv t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
+ (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr,
+ ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_temp_free(t0);
} else {
- /* XXX: we need to update nip before the store
- * if we enter power saving mode, we will exit the loop
- * directly from ppc_store_msr
+ /*
+ * XXX: we need to update nip before the store if we enter
+ * power saving mode, we will exit the loop directly from
+ * ppc_store_msr
*/
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
@@ -4342,16 +4395,19 @@ static void gen_mtmsr(DisasContext *ctx)
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
TCGv t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
+ (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr,
+ ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_temp_free(t0);
} else {
TCGv msr = tcg_temp_new();
- /* XXX: we need to update nip before the store
- * if we enter power saving mode, we will exit the loop
- * directly from ppc_store_msr
+ /*
+ * XXX: we need to update nip before the store if we enter
+ * power saving mode, we will exit the loop directly from
+ * ppc_store_msr
*/
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
@@ -4415,8 +4471,9 @@ static void gen_mtspr(DisasContext *ctx)
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
- /* The behaviour depends on MSR:PR and SPR# bit 0x10,
- * it can generate a priv, a hv emu or a no-op
+ /*
+ * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
+ * generate a priv, a hv emu or a no-op
*/
if (sprn & 0x10) {
if (ctx->pr) {
@@ -4526,36 +4583,40 @@ static void gen_dcbstep(DisasContext *ctx)
/* dcbt */
static void gen_dcbt(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
/* dcbtep */
static void gen_dcbtep(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
/* dcbtst */
static void gen_dcbtst(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
/* dcbtstep */
static void gen_dcbtstep(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
@@ -4653,8 +4714,9 @@ static void gen_icbiep(DisasContext *ctx)
/* dcba */
static void gen_dcba(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a store by the MMU
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a store by the MMU
* but does not generate any exception
*/
}
@@ -5021,8 +5083,9 @@ static void gen_abs(DisasContext *ctx)
gen_set_label(l1);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* abso - abso. */
@@ -5044,8 +5107,9 @@ static void gen_abso(DisasContext *ctx)
gen_set_label(l2);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
gen_set_label(l3);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* clcs */
@@ -5062,8 +5126,9 @@ static void gen_div(DisasContext *ctx)
{
gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* divo - divo. */
@@ -5071,8 +5136,9 @@ static void gen_divo(DisasContext *ctx)
{
gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* divs - divs. */
@@ -5080,8 +5146,9 @@ static void gen_divs(DisasContext *ctx)
{
gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* divso - divso. */
@@ -5089,8 +5156,9 @@ static void gen_divso(DisasContext *ctx)
{
gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env,
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* doz - doz. */
@@ -5098,14 +5166,17 @@ static void gen_doz(DisasContext *ctx)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
- tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)]);
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* dozo - dozo. */
@@ -5118,7 +5189,8 @@ static void gen_dozo(DisasContext *ctx)
TCGv t2 = tcg_temp_new();
/* Start with XER OV disabled, the most likely case */
tcg_gen_movi_tl(cpu_ov, 0);
- tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], l1);
tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
@@ -5134,8 +5206,9 @@ static void gen_dozo(DisasContext *ctx)
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* dozi */
@@ -5150,8 +5223,9 @@ static void gen_dozi(DisasContext *ctx)
gen_set_label(l1);
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* lscbx - lscbx. */
@@ -5169,8 +5243,9 @@ static void gen_lscbx(DisasContext *ctx)
tcg_temp_free_i32(t3);
tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, t0);
+ }
tcg_temp_free(t0);
}
@@ -5196,8 +5271,9 @@ static void gen_maskg(DisasContext *ctx)
tcg_temp_free(t1);
tcg_temp_free(t2);
tcg_temp_free(t3);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* maskir - maskir. */
@@ -5210,8 +5286,9 @@ static void gen_maskir(DisasContext *ctx)
tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* mul - mul. */
@@ -5230,8 +5307,9 @@ static void gen_mul(DisasContext *ctx)
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* mulo - mulo. */
@@ -5258,8 +5336,9 @@ static void gen_mulo(DisasContext *ctx)
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* nabs - nabs. */
@@ -5273,8 +5352,9 @@ static void gen_nabs(DisasContext *ctx)
gen_set_label(l1);
tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* nabso - nabso. */
@@ -5290,8 +5370,9 @@ static void gen_nabso(DisasContext *ctx)
gen_set_label(l2);
/* nabs never overflows */
tcg_gen_movi_tl(cpu_ov, 0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
}
/* rlmi - rlmi. */
@@ -5303,11 +5384,13 @@ static void gen_rlmi(DisasContext *ctx)
tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
tcg_gen_andi_tl(t0, t0, MASK(mb, me));
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ ~MASK(mb, me));
tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* rrib - rrib. */
@@ -5324,8 +5407,9 @@ static void gen_rrib(DisasContext *ctx)
tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sle - sle. */
@@ -5342,8 +5426,9 @@ static void gen_sle(DisasContext *ctx)
gen_store_spr(SPR_MQ, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sleq - sleq. */
@@ -5364,8 +5449,9 @@ static void gen_sleq(DisasContext *ctx)
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sliq - sliq. */
@@ -5381,8 +5467,9 @@ static void gen_sliq(DisasContext *ctx)
gen_store_spr(SPR_MQ, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* slliq - slliq. */
@@ -5399,8 +5486,9 @@ static void gen_slliq(DisasContext *ctx)
tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sllq - sllq. */
@@ -5428,8 +5516,9 @@ static void gen_sllq(DisasContext *ctx)
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* slq - slq. */
@@ -5451,8 +5540,9 @@ static void gen_slq(DisasContext *ctx)
gen_set_label(l1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sraiq - sraiq. */
@@ -5474,8 +5564,9 @@ static void gen_sraiq(DisasContext *ctx)
tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sraq - sraq. */
@@ -5507,8 +5598,9 @@ static void gen_sraq(DisasContext *ctx)
gen_set_label(l2);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sre - sre. */
@@ -5525,8 +5617,9 @@ static void gen_sre(DisasContext *ctx)
gen_store_spr(SPR_MQ, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srea - srea. */
@@ -5540,8 +5633,9 @@ static void gen_srea(DisasContext *ctx)
tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sreq */
@@ -5562,8 +5656,9 @@ static void gen_sreq(DisasContext *ctx)
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* sriq */
@@ -5579,8 +5674,9 @@ static void gen_sriq(DisasContext *ctx)
gen_store_spr(SPR_MQ, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srliq */
@@ -5597,8 +5693,9 @@ static void gen_srliq(DisasContext *ctx)
tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srlq */
@@ -5627,8 +5724,9 @@ static void gen_srlq(DisasContext *ctx)
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* srq */
@@ -5650,8 +5748,9 @@ static void gen_srq(DisasContext *ctx)
gen_set_label(l1);
tcg_temp_free(t0);
tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0))
+ if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
}
/* PowerPC 602 specific instructions */
@@ -5769,8 +5868,9 @@ static void gen_mfsri(DisasContext *ctx)
tcg_gen_extract_tl(t0, t0, 28, 4);
gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0);
tcg_temp_free(t0);
- if (ra != 0 && ra != rd)
+ if (ra != 0 && ra != rd) {
tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
+ }
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6147,9 +6247,10 @@ static void gen_dcread(DisasContext *ctx)
/* icbt */
static void gen_icbt_40x(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
@@ -6440,7 +6541,7 @@ static void gen_tlbilx_booke206(DisasContext *ctx)
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- switch((ctx->opcode >> 21) & 0x3) {
+ switch ((ctx->opcode >> 21) & 0x3) {
case 0:
gen_helper_booke206_tlbilx0(cpu_env, t0);
break;
@@ -6474,8 +6575,9 @@ static void gen_wrtee(DisasContext *ctx)
tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_temp_free(t0);
- /* Stop translation to have a chance to raise an exception
- * if we just set msr_ee to 1
+ /*
+ * Stop translation to have a chance to raise an exception if we
+ * just set msr_ee to 1
*/
gen_stop_exception(ctx);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -6529,9 +6631,10 @@ static void gen_msync_4xx(DisasContext *ctx)
/* icbt */
static void gen_icbt_440(DisasContext *ctx)
{
- /* interpreted as no-op */
- /* XXX: specification say this is treated as a load by the MMU
- * but does not generate any exception
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
*/
}
@@ -6625,7 +6728,8 @@ static inline void gen_##name(DisasContext *ctx) \
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
return; \
} \
- /* Because tbegin always fails in QEMU, these user \
+ /* \
+ * Because tbegin always fails in QEMU, these user \
* space instructions all have a simple implementation: \
* \
* CR[0] = 0b0 || MSR[TS] || 0b0 \
@@ -6641,17 +6745,18 @@ GEN_TM_NOOP(tabortwci);
GEN_TM_NOOP(tabortdc);
GEN_TM_NOOP(tabortdci);
GEN_TM_NOOP(tsr);
+
static inline void gen_cp_abort(DisasContext *ctx)
{
- // Do Nothing
+ /* Do Nothing */
}
#define GEN_CP_PASTE_NOOP(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- /* Generate invalid exception until \
- * we have an implementation of the copy \
- * paste facility \
+ /* \
+ * Generate invalid exception until we have an \
+ * implementation of the copy paste facility \
*/ \
gen_invalid(ctx); \
}
@@ -6665,8 +6770,9 @@ static void gen_tcheck(DisasContext *ctx)
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
return;
}
- /* Because tbegin always fails, the tcheck implementation
- * is simple:
+ /*
+ * Because tbegin always fails, the tcheck implementation is
+ * simple:
*
* CR[CRF] = TDOOMED || MSR[TS] || 0b0
* = 0b1 || 0b00 || 0b0
@@ -6678,7 +6784,7 @@ static void gen_tcheck(DisasContext *ctx)
#define GEN_TM_PRIV_NOOP(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \
}
#else
@@ -6691,7 +6797,8 @@ static inline void gen_##name(DisasContext *ctx) \
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
return; \
} \
- /* Because tbegin always fails, the implementation is \
+ /* \
+ * Because tbegin always fails, the implementation is \
* simple: \
* \
* CR[0] = 0b0 || MSR[TS] || 0b0 \
@@ -6973,8 +7080,10 @@ GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B),
GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B),
#endif
GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
-/* XXX Those instructions will need to be handled differently for
- * different ISA versions */
+/*
+ * XXX Those instructions will need to be handled differently for
+ * different ISA versions
+ */
GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE),
GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE),
GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300),
@@ -7444,11 +7553,13 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
);
#endif
for (i = 0; i < 32; i++) {
- if ((i & (RGPL - 1)) == 0)
+ if ((i & (RGPL - 1)) == 0) {
qemu_fprintf(f, "GPR%02d", i);
+ }
qemu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i));
- if ((i & (RGPL - 1)) == (RGPL - 1))
+ if ((i & (RGPL - 1)) == (RGPL - 1)) {
qemu_fprintf(f, "\n");
+ }
}
qemu_fprintf(f, "CR ");
for (i = 0; i < 8; i++)
@@ -7456,12 +7567,13 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, " [");
for (i = 0; i < 8; i++) {
char a = '-';
- if (env->crf[i] & 0x08)
+ if (env->crf[i] & 0x08) {
a = 'L';
- else if (env->crf[i] & 0x04)
+ } else if (env->crf[i] & 0x04) {
a = 'G';
- else if (env->crf[i] & 0x02)
+ } else if (env->crf[i] & 0x02) {
a = 'E';
+ }
qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
}
qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
@@ -7543,8 +7655,9 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
#endif
- if (env->spr_cb[SPR_LPCR].name)
+ if (env->spr_cb[SPR_LPCR].name) {
qemu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
+ }
switch (env->mmu_model) {
case POWERPC_MMU_32B:
@@ -7610,8 +7723,9 @@ void ppc_cpu_dump_statistics(CPUState *cs, int flags)
t3 = ind_table(handler);
for (op3 = 0; op3 < 32; op3++) {
handler = t3[op3];
- if (handler->count == 0)
+ if (handler->count == 0) {
continue;
+ }
qemu_printf("%02x %02x %02x (%02x %04d) %16s: "
"%016" PRIx64 " %" PRId64 "\n",
op1, op2, op3, op1, (op3 << 5) | op2,
@@ -7619,8 +7733,9 @@ void ppc_cpu_dump_statistics(CPUState *cs, int flags)
handler->count, handler->count);
}
} else {
- if (handler->count == 0)
+ if (handler->count == 0) {
continue;
+ }
qemu_printf("%02x %02x (%02x %04d) %16s: "
"%016" PRIx64 " %" PRId64 "\n",
op1, op2, op1, op2, handler->oname,
@@ -7628,8 +7743,9 @@ void ppc_cpu_dump_statistics(CPUState *cs, int flags)
}
}
} else {
- if (handler->count == 0)
+ if (handler->count == 0) {
continue;
+ }
qemu_printf("%02x (%02x ) %16s: %016" PRIx64
" %" PRId64 "\n",
op1, op1, handler->oname,
@@ -7669,14 +7785,16 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|| (env->mmu_model & POWERPC_MMU_64B);
ctx->fpu_enabled = !!msr_fp;
- if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
+ if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) {
ctx->spe_enabled = !!msr_spe;
- else
+ } else {
ctx->spe_enabled = false;
- if ((env->flags & POWERPC_FLAG_VRE) && msr_vr)
+ }
+ if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) {
ctx->altivec_enabled = !!msr_vr;
- else
+ } else {
ctx->altivec_enabled = false;
+ }
if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) {
ctx->vsx_enabled = !!msr_vsx;
} else {
@@ -7690,12 +7808,14 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
}
#endif
ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE);
- if ((env->flags & POWERPC_FLAG_SE) && msr_se)
+ if ((env->flags & POWERPC_FLAG_SE) && msr_se) {
ctx->singlestep_enabled = CPU_SINGLE_STEP;
- else
+ } else {
ctx->singlestep_enabled = 0;
- if ((env->flags & POWERPC_FLAG_BE) && msr_be)
+ }
+ if ((env->flags & POWERPC_FLAG_BE) && msr_be) {
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
+ }
if ((env->flags & POWERPC_FLAG_DE) && msr_de) {
ctx->singlestep_enabled = 0;
target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
@@ -7710,7 +7830,7 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
if (unlikely(ctx->base.singlestep_enabled)) {
ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
}
-#if defined (DO_SINGLE_STEP) && 0
+#if defined(DO_SINGLE_STEP) && 0
/* Single step trace mode */
msr_se = 1;
#endif
@@ -7735,10 +7855,12 @@ static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
gen_debug_exception(ctx);
dcbase->is_jmp = DISAS_NORETURN;
- /* The address covered by the breakpoint must be included in
- [tb->pc, tb->pc + tb->size) in order to for it to be
- properly cleared -- thus we increment the PC here so that
- the logic setting tb->size below does the right thing. */
+ /*
+ * The address covered by the breakpoint must be included in
+ * [tb->pc, tb->pc + tb->size) in order to for it to be properly
+ * cleared -- thus we increment the PC here so that the logic
+ * setting tb->size below does the right thing.
+ */
ctx->base.pc_next += 4;
return true;
}
diff --git a/target/ppc/translate/fp-impl.inc.c b/target/ppc/translate/fp-impl.inc.c
index 0f21a4e..9dcff94 100644
--- a/target/ppc/translate/fp-impl.inc.c
+++ b/target/ppc/translate/fp-impl.inc.c
@@ -585,11 +585,13 @@ static void gen_mcrfs(DisasContext *ctx)
shift = 4 * nibble;
tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
- tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
+ 0xf);
tcg_temp_free(tmp);
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
/* Only the exception bits (including FX) should be cleared if read */
- tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
+ ~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
tmask = tcg_const_i32(1 << nibble);
gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
@@ -735,7 +737,7 @@ static void gen_mtfsfi(DisasContext *ctx)
/*** Floating-point load ***/
#define GEN_LDF(name, ldop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -754,7 +756,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_LDUF(name, ldop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -778,7 +780,7 @@ static void glue(gen_, name##u)(DisasContext *ctx)
}
#define GEN_LDUXF(name, ldop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -802,7 +804,7 @@ static void glue(gen_, name##ux)(DisasContext *ctx)
}
#define GEN_LDXF(name, ldop, opc2, opc3, type) \
-static void glue(gen_, name##x)(DisasContext *ctx) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -872,8 +874,10 @@ static void gen_lfdp(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
t0 = tcg_temp_new_i64();
- /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
- necessary 64-bit byteswap already. */
+ /*
+ * We only need to swap high and low halves. gen_qemu_ld64_i64
+ * does necessary 64-bit byteswap already.
+ */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
@@ -904,8 +908,10 @@ static void gen_lfdpx(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
t0 = tcg_temp_new_i64();
- /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
- necessary 64-bit byteswap already. */
+ /*
+ * We only need to swap high and low halves. gen_qemu_ld64_i64
+ * does necessary 64-bit byteswap already.
+ */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
@@ -966,7 +972,7 @@ static void gen_lfiwzx(DisasContext *ctx)
}
/*** Floating-point store ***/
#define GEN_STF(name, stop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -985,7 +991,7 @@ static void glue(gen_, name)(DisasContext *ctx)
}
#define GEN_STUF(name, stop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -1009,7 +1015,7 @@ static void glue(gen_, name##u)(DisasContext *ctx)
}
#define GEN_STUXF(name, stop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -1033,7 +1039,7 @@ static void glue(gen_, name##ux)(DisasContext *ctx)
}
#define GEN_STXF(name, stop, opc2, opc3, type) \
-static void glue(gen_, name##x)(DisasContext *ctx) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 t0; \
@@ -1103,8 +1109,10 @@ static void gen_stfdp(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_st64_i64 does
- necessary 64-bit byteswap already. */
+ /*
+ * We only need to swap high and low halves. gen_qemu_st64_i64
+ * does necessary 64-bit byteswap already.
+ */
if (unlikely(ctx->le_mode)) {
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
@@ -1135,8 +1143,10 @@ static void gen_stfdpx(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_st64_i64 does
- necessary 64-bit byteswap already. */
+ /*
+ * We only need to swap high and low halves. gen_qemu_st64_i64
+ * does necessary 64-bit byteswap already.
+ */
if (unlikely(ctx->le_mode)) {
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
@@ -1204,8 +1214,9 @@ static void gen_lfqu(DisasContext *ctx)
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64_i64(ctx, t2, t1);
set_fpr((rd + 1) % 32, t2);
- if (ra != 0)
+ if (ra != 0) {
tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free_i64(t2);
@@ -1229,8 +1240,9 @@ static void gen_lfqux(DisasContext *ctx)
gen_qemu_ld64_i64(ctx, t2, t1);
set_fpr((rd + 1) % 32, t2);
tcg_temp_free(t1);
- if (ra != 0)
+ if (ra != 0) {
tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
tcg_temp_free(t0);
tcg_temp_free_i64(t2);
}
diff --git a/target/ppc/translate/spe-impl.inc.c b/target/ppc/translate/spe-impl.inc.c
index 8c1c16c..7ab0a29 100644
--- a/target/ppc/translate/spe-impl.inc.c
+++ b/target/ppc/translate/spe-impl.inc.c
@@ -18,7 +18,8 @@ static inline void gen_evmra(DisasContext *ctx)
TCGv_i64 tmp = tcg_temp_new_i64();
/* tmp := rA_lo + rA_hi << 32 */
- tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+ tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)],
+ cpu_gprh[rA(ctx->opcode)]);
/* spe_acc := tmp */
tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
@@ -780,7 +781,7 @@ static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
}
#define GEN_SPEOP_LDST(name, opc2, sh) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv t0; \
if (unlikely(!ctx->spe_enabled)) { \
@@ -1089,7 +1090,8 @@ static inline void gen_efsabs(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
- tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ (target_long)~0x80000000LL);
}
static inline void gen_efsnabs(DisasContext *ctx)
{
@@ -1097,7 +1099,8 @@ static inline void gen_efsnabs(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
- tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
}
static inline void gen_efsneg(DisasContext *ctx)
{
@@ -1105,7 +1108,8 @@ static inline void gen_efsneg(DisasContext *ctx)
gen_exception(ctx, POWERPC_EXCP_SPEU);
return;
}
- tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
}
/* Conversion */
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
index eb10c53..bd3ff40 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -15,7 +15,7 @@ static inline TCGv_ptr gen_avr_ptr(int reg)
}
#define GEN_VR_LDX(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv EA; \
TCGv_i64 avr; \
@@ -28,8 +28,10 @@ static void glue(gen_, name)(DisasContext *ctx)
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
- necessary 64-bit byteswap already. */ \
+ /* \
+ * We only need to swap high and low halves. gen_qemu_ld64_i64 \
+ * does necessary 64-bit byteswap already. \
+ */ \
if (ctx->le_mode) { \
gen_qemu_ld64_i64(ctx, avr, EA); \
set_avr64(rD(ctx->opcode), avr, false); \
@@ -61,8 +63,10 @@ static void gen_st##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_st64_i64 does \
- necessary 64-bit byteswap already. */ \
+ /* \
+ * We only need to swap high and low halves. gen_qemu_st64_i64 \
+ * does necessary 64-bit byteswap already. \
+ */ \
if (ctx->le_mode) { \
get_avr64(avr, rD(ctx->opcode), false); \
gen_qemu_st64_i64(ctx, avr, EA); \
@@ -296,7 +300,7 @@ GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
#define GEN_VXFORM(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr ra, rb, rd; \
if (unlikely(!ctx->altivec_enabled)) { \
@@ -306,7 +310,7 @@ static void glue(gen_, name)(DisasContext *ctx)
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, ra, rb); \
+ gen_helper_##name(rd, ra, rb); \
tcg_temp_free_ptr(ra); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(rd); \
@@ -758,7 +762,7 @@ GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13);
GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14);
#define GEN_VXFORM_NOA(name, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr rb, rd; \
if (unlikely(!ctx->altivec_enabled)) { \
@@ -767,9 +771,9 @@ static void glue(gen_, name)(DisasContext *ctx)
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name (rd, rb); \
+ gen_helper_##name(rd, rb); \
tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
+ tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
@@ -943,7 +947,7 @@ static void gen_vsldoi(DisasContext *ctx)
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
sh = tcg_const_i32(VSH(ctx->opcode));
- gen_helper_vsldoi (rd, ra, rb, sh);
+ gen_helper_vsldoi(rd, ra, rb, sh);
tcg_temp_free_ptr(ra);
tcg_temp_free_ptr(rb);
tcg_temp_free_ptr(rd);
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index 489b243..11d9b75 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -751,7 +751,7 @@ static void gen_xxpermdi(DisasContext *ctx)
#define SGN_MASK_SP 0x8000000080000000ull
#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
-static void glue(gen_, name)(DisasContext * ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 xb, sgm; \
if (unlikely(!ctx->vsx_enabled)) { \
@@ -848,7 +848,7 @@ VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
-static void glue(gen_, name)(DisasContext * ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 xbh, xbl, sgm; \
if (unlikely(!ctx->vsx_enabled)) { \
@@ -910,7 +910,7 @@ VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext * ctx) \
+static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i32 opc; \
if (unlikely(!ctx->vsx_enabled)) { \
@@ -923,7 +923,7 @@ static void gen_##name(DisasContext * ctx) \
}
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
-static void gen_##name(DisasContext * ctx) \
+static void gen_##name(DisasContext *ctx) \
{ \
TCGv_i64 t0; \
TCGv_i64 t1; \
@@ -1230,7 +1230,7 @@ static void gen_xxbrw(DisasContext *ctx)
}
#define VSX_LOGICAL(name, vece, tcg_op) \
-static void glue(gen_, name)(DisasContext * ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
@@ -1251,7 +1251,7 @@ VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
#define VSX_XXMRG(name, high) \
-static void glue(gen_, name)(DisasContext * ctx) \
+static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_i64 a0, a1, b0, b1, tmp; \
if (unlikely(!ctx->vsx_enabled)) { \
@@ -1444,7 +1444,8 @@ static void gen_##name(DisasContext *ctx) \
xb = tcg_const_tl(xB(ctx->opcode)); \
t0 = tcg_temp_new_i32(); \
t1 = tcg_temp_new_i64(); \
- /* uimm > 15 out of bound and for \
+ /* \
+ * uimm > 15 out of bound and for \
* uimm > 12 handle as per hardware in helper \
*/ \
if (uimm > 15) { \
diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c
index 20a64f3..0394a9d 100644
--- a/target/ppc/translate_init.inc.c
+++ b/target/ppc/translate_init.inc.c
@@ -41,12 +41,13 @@
#include "fpu/softfloat.h"
#include "qapi/qapi-commands-target.h"
-//#define PPC_DUMP_CPU
-//#define PPC_DEBUG_SPR
-//#define PPC_DUMP_SPR_ACCESSES
+/* #define PPC_DUMP_CPU */
+/* #define PPC_DEBUG_SPR */
+/* #define PPC_DUMP_SPR_ACCESSES */
/* #define USE_APPLE_GDB */
-/* Generic callbacks:
+/*
+ * Generic callbacks:
* do nothing but store/retrieve spr value
*/
static void spr_load_dump_spr(int sprn)
@@ -58,7 +59,7 @@ static void spr_load_dump_spr(int sprn)
#endif
}
-static void spr_read_generic (DisasContext *ctx, int gprn, int sprn)
+static void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
{
gen_load_spr(cpu_gpr[gprn], sprn);
spr_load_dump_spr(sprn);
@@ -230,13 +231,13 @@ static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
}
}
-__attribute__ (( unused ))
+ATTRIBUTE_UNUSED
static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
}
-__attribute__ (( unused ))
+ATTRIBUTE_UNUSED
static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
@@ -267,20 +268,20 @@ static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
}
}
-__attribute__ (( unused ))
+ATTRIBUTE_UNUSED
static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
}
-__attribute__ (( unused ))
+ATTRIBUTE_UNUSED
static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
-__attribute__ (( unused ))
+ATTRIBUTE_UNUSED
static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
@@ -319,12 +320,16 @@ static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
/* IBAT0L...IBAT7L */
static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
}
static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
@@ -359,12 +364,16 @@ static void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
/* DBAT0L...DBAT7L */
static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
}
static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
}
static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
@@ -473,7 +482,9 @@ static void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
@@ -532,7 +543,8 @@ static void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
}
static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
@@ -661,14 +673,20 @@ static inline void vscr_init(CPUPPCState *env, uint32_t val)
static inline void _spr_register(CPUPPCState *env, int num,
const char *name,
- void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
- void (*uea_write)(DisasContext *ctx, int sprn, int gprn),
+ void (*uea_read)(DisasContext *ctx,
+ int gprn, int sprn),
+ void (*uea_write)(DisasContext *ctx,
+ int sprn, int gprn),
#if !defined(CONFIG_USER_ONLY)
- void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
- void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
- void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
- void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
+ void (*oea_read)(DisasContext *ctx,
+ int gprn, int sprn),
+ void (*oea_write)(DisasContext *ctx,
+ int sprn, int gprn),
+ void (*hea_read)(DisasContext *opaque,
+ int gprn, int sprn),
+ void (*hea_write)(DisasContext *opaque,
+ int sprn, int gprn),
#endif
#if defined(CONFIG_KVM)
uint64_t one_reg_id,
@@ -678,7 +696,7 @@ static inline void _spr_register(CPUPPCState *env, int num,
ppc_spr_t *spr;
spr = &env->spr_cb[num];
- if (spr->name != NULL ||env-> spr[num] != 0x00000000 ||
+ if (spr->name != NULL || env->spr[num] != 0x00000000 ||
#if !defined(CONFIG_USER_ONLY)
spr->oea_read != NULL || spr->oea_write != NULL ||
#endif
@@ -774,8 +792,10 @@ static void gen_spr_sdr1(CPUPPCState *env)
{
#ifndef CONFIG_USER_ONLY
if (env->has_hv_mode) {
- /* SDR1 is a hypervisor resource on CPUs which have a
- * hypervisor mode */
+ /*
+ * SDR1 is a hypervisor resource on CPUs which have a
+ * hypervisor mode
+ */
spr_register_hv(env, SPR_SDR1, "SDR1",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
@@ -1123,7 +1143,8 @@ static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
- /* Note, the HV=1 PR=0 case is handled earlier by simply using
+ /*
+ * Note, the HV=1 PR=0 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@@ -1157,7 +1178,8 @@ static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
- /* Note, the HV=1 case is handled earlier by simply using
+ /*
+ * Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@@ -1187,7 +1209,8 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
TCGv t1 = tcg_temp_new();
TCGv t2 = tcg_temp_new();
- /* Note, the HV=1 case is handled earlier by simply using
+ /*
+ * Note, the HV=1 case is handled earlier by simply using
* spr_write_generic for HV mode in the SPR table
*/
@@ -1215,10 +1238,13 @@ static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
static void gen_spr_amr(CPUPPCState *env)
{
#ifndef CONFIG_USER_ONLY
- /* Virtual Page Class Key protection */
- /* The AMR is accessible either via SPR 13 or SPR 29. 13 is
+ /*
+ * Virtual Page Class Key protection
+ *
+ * The AMR is accessible either via SPR 13 or SPR 29. 13 is
* userspace accessible, 29 is privileged. So we only need to set
- * the kvm ONE_REG id on one of them, we use 29 */
+ * the kvm ONE_REG id on one of them, we use 29
+ */
spr_register(env, SPR_UAMR, "UAMR",
&spr_read_generic, &spr_write_amr,
&spr_read_generic, &spr_write_amr,
@@ -1902,7 +1928,8 @@ static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask,
/* TLB assist registers */
/* XXX : not implemented */
for (i = 0; i < 8; i++) {
- void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32;
+ void (*uea_write)(DisasContext *ctx, int sprn, int gprn) =
+ &spr_write_generic32;
if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) {
uea_write = &spr_write_generic;
}
@@ -2798,7 +2825,6 @@ static void gen_spr_8xx(CPUPPCState *env)
0x00000000);
}
-// XXX: TODO
/*
* AMR => SPR 29 (Power 2.04)
* CTRL => SPR 136 (Power 2.04)
@@ -3344,16 +3370,18 @@ static int check_pow_nocheck(CPUPPCState *env)
static int check_pow_hid0(CPUPPCState *env)
{
- if (env->spr[SPR_HID0] & 0x00E00000)
+ if (env->spr[SPR_HID0] & 0x00E00000) {
return 1;
+ }
return 0;
}
static int check_pow_hid0_74xx(CPUPPCState *env)
{
- if (env->spr[SPR_HID0] & 0x00600000)
+ if (env->spr[SPR_HID0] & 0x00600000) {
return 1;
+ }
return 0;
}
@@ -4602,7 +4630,8 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
dc->desc = "e200 core";
pcc->init_proc = init_proc_e200;
pcc->check_pow = check_pow_hid0;
- /* XXX: unimplemented instructions:
+ /*
+ * XXX: unimplemented instructions:
* dcblc
* dcbtlst
* dcbtstls
@@ -4797,18 +4826,18 @@ static void init_proc_e500(CPUPPCState *env, int version)
* gen_spr_BookE(env, 0x0000000F0000FD7FULL);
*/
switch (version) {
- case fsl_e500v1:
- case fsl_e500v2:
- default:
- ivor_mask = 0x0000000F0000FFFFULL;
- break;
- case fsl_e500mc:
- case fsl_e5500:
- ivor_mask = 0x000003FE0000FFFFULL;
- break;
- case fsl_e6500:
- ivor_mask = 0x000003FF0000FFFFULL;
- break;
+ case fsl_e500v1:
+ case fsl_e500v2:
+ default:
+ ivor_mask = 0x0000000F0000FFFFULL;
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ ivor_mask = 0x000003FE0000FFFFULL;
+ break;
+ case fsl_e6500:
+ ivor_mask = 0x000003FF0000FFFFULL;
+ break;
}
gen_spr_BookE(env, ivor_mask);
gen_spr_usprg3(env);
@@ -4848,7 +4877,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
tlbncfg[1] = 0x40028040;
break;
default:
- cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
+ env->spr[SPR_PVR]);
}
#endif
/* Cache sizes */
@@ -4872,7 +4902,8 @@ static void init_proc_e500(CPUPPCState *env, int version)
l1cfg1 |= 0x0B83820;
break;
default:
- cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n",
+ env->spr[SPR_PVR]);
}
gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg);
/* XXX : not implemented */
@@ -5252,7 +5283,8 @@ static void init_proc_601(CPUPPCState *env)
0x00000000);
/* Memory management */
init_excp_601(env);
- /* XXX: beware that dcache line size is 64
+ /*
+ * XXX: beware that dcache line size is 64
* but dcbz uses 32 bytes "sectors"
* XXX: this breaks clcs instruction !
*/
@@ -5789,7 +5821,8 @@ static void init_proc_750(CPUPPCState *env)
0x00000000);
/* Memory management */
gen_low_BATs(env);
- /* XXX: high BATs are also present but are known to be bugged on
+ /*
+ * XXX: high BATs are also present but are known to be bugged on
* die version 1.x
*/
init_excp_7x0(env);
@@ -5971,7 +6004,8 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
dc->desc = "PowerPC 750 CL";
pcc->init_proc = init_proc_750cl;
pcc->check_pow = check_pow_hid0;
- /* XXX: not implemented:
+ /*
+ * XXX: not implemented:
* cache lock instructions:
* dcbz_l
* floating point paired instructions
@@ -7569,8 +7603,10 @@ static void gen_spr_book3s_altivec(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_VRSAVE, 0x00000000);
- /* Can't find information on what this should be on reset. This
- * value is the one used by 74xx processors. */
+ /*
+ * Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors.
+ */
vscr_init(env, 0x00010000);
}
@@ -8975,8 +9011,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
env->irq_inputs = NULL;
/* Set all exception vectors to an invalid address */
- for (i = 0; i < POWERPC_EXCP_NB; i++)
+ for (i = 0; i < POWERPC_EXCP_NB; i++) {
env->excp_vectors[i] = (target_ulong)(-1ULL);
+ }
env->ivor_mask = 0x00000000;
env->ivpr_mask = 0x00000000;
/* Default MMU definitions */
@@ -9108,8 +9145,9 @@ static void init_ppc_proc(PowerPCCPU *cpu)
#if !defined(CONFIG_USER_ONLY)
if (env->nb_tlb != 0) {
int nb_tlb = env->nb_tlb;
- if (env->id_tlbs != 0)
+ if (env->id_tlbs != 0) {
nb_tlb *= 2;
+ }
switch (env->tlb_type) {
case TLB_6XX:
env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb);
@@ -9201,8 +9239,9 @@ static void fill_new_table(opc_handler_t **table, int len)
{
int i;
- for (i = 0; i < len; i++)
+ for (i = 0; i < len; i++) {
table[i] = &invalid_handler;
+ }
}
static int create_new_table(opc_handler_t **table, unsigned char idx)
@@ -9219,8 +9258,9 @@ static int create_new_table(opc_handler_t **table, unsigned char idx)
static int insert_in_table(opc_handler_t **table, unsigned char idx,
opc_handler_t *handler)
{
- if (table[idx] != &invalid_handler)
+ if (table[idx] != &invalid_handler) {
return -1;
+ }
table[idx] = handler;
return 0;
@@ -9341,17 +9381,20 @@ static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
}
} else {
if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
- insn->opc3, &insn->handler) < 0)
+ insn->opc3, &insn->handler) < 0) {
return -1;
+ }
}
} else {
if (register_ind_insn(ppc_opcodes, insn->opc1,
- insn->opc2, &insn->handler) < 0)
+ insn->opc2, &insn->handler) < 0) {
return -1;
+ }
}
} else {
- if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0)
+ if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
return -1;
+ }
}
return 0;
@@ -9363,8 +9406,9 @@ static int test_opcode_table(opc_handler_t **table, int len)
for (i = 0, count = 0; i < len; i++) {
/* Consistency fixup */
- if (table[i] == NULL)
+ if (table[i] == NULL) {
table[i] = &invalid_handler;
+ }
if (table[i] != &invalid_handler) {
if (is_indirect_opcode(table[i])) {
tmp = test_opcode_table(ind_table(table[i]),
@@ -9386,8 +9430,9 @@ static int test_opcode_table(opc_handler_t **table, int len)
static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
{
- if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0)
+ if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
printf("*** WARNING: no opcode defined !\n");
+ }
}
/*****************************************************************************/
@@ -9726,14 +9771,15 @@ static int ppc_fixup_cpu(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
- /* TCG doesn't (yet) emulate some groups of instructions that
- * are implemented on some otherwise supported CPUs (e.g. VSX
- * and decimal floating point instructions on POWER7). We
- * remove unsupported instruction groups from the cpu state's
- * instruction masks and hope the guest can cope. For at
- * least the pseries machine, the unavailability of these
- * instructions can be advertised to the guest via the device
- * tree. */
+ /*
+ * TCG doesn't (yet) emulate some groups of instructions that are
+ * implemented on some otherwise supported CPUs (e.g. VSX and
+ * decimal floating point instructions on POWER7). We remove
+ * unsupported instruction groups from the cpu state's instruction
+ * masks and hope the guest can cope. For at least the pseries
+ * machine, the unavailability of these instructions can be
+ * advertised to the guest via the device tree.
+ */
if ((env->insns_flags & ~PPC_TCG_INSNS)
|| (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
warn_report("Disabling some instructions which are not "
@@ -9928,31 +9974,37 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
" Bus model : %s\n",
excp_model, bus_model);
printf(" MSR features :\n");
- if (env->flags & POWERPC_FLAG_SPE)
+ if (env->flags & POWERPC_FLAG_SPE) {
printf(" signal processing engine enable"
"\n");
- else if (env->flags & POWERPC_FLAG_VRE)
+ } else if (env->flags & POWERPC_FLAG_VRE) {
printf(" vector processor enable\n");
- if (env->flags & POWERPC_FLAG_TGPR)
+ }
+ if (env->flags & POWERPC_FLAG_TGPR) {
printf(" temporary GPRs\n");
- else if (env->flags & POWERPC_FLAG_CE)
+ } else if (env->flags & POWERPC_FLAG_CE) {
printf(" critical input enable\n");
- if (env->flags & POWERPC_FLAG_SE)
+ }
+ if (env->flags & POWERPC_FLAG_SE) {
printf(" single-step trace mode\n");
- else if (env->flags & POWERPC_FLAG_DWE)
+ } else if (env->flags & POWERPC_FLAG_DWE) {
printf(" debug wait enable\n");
- else if (env->flags & POWERPC_FLAG_UBLE)
+ } else if (env->flags & POWERPC_FLAG_UBLE) {
printf(" user BTB lock enable\n");
- if (env->flags & POWERPC_FLAG_BE)
+ }
+ if (env->flags & POWERPC_FLAG_BE) {
printf(" branch-step trace mode\n");
- else if (env->flags & POWERPC_FLAG_DE)
+ } else if (env->flags & POWERPC_FLAG_DE) {
printf(" debug interrupt enable\n");
- if (env->flags & POWERPC_FLAG_PX)
+ }
+ if (env->flags & POWERPC_FLAG_PX) {
printf(" inclusive protection\n");
- else if (env->flags & POWERPC_FLAG_PMM)
+ } else if (env->flags & POWERPC_FLAG_PMM) {
printf(" performance monitor mark\n");
- if (env->flags == POWERPC_FLAG_NONE)
+ }
+ if (env->flags == POWERPC_FLAG_NONE) {
printf(" none\n");
+ }
printf(" Time-base/decrementer clock source: %s\n",
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
dump_ppc_insns(env);
@@ -10094,8 +10146,9 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
const char *p;
unsigned long pvr;
- /* Lookup by PVR if cpu_model is valid 8 digit hex number
- * (excl: 0x prefix if present)
+ /*
+ * Lookup by PVR if cpu_model is valid 8 digit hex number (excl:
+ * 0x prefix if present)
*/
if (!qemu_strtoul(name, &p, 16, &pvr)) {
int len = p - name;
@@ -10439,14 +10492,14 @@ static void ppc_cpu_instance_init(Object *obj)
env->bfd_mach = pcc->bfd_mach;
env->check_pow = pcc->check_pow;
- /* Mark HV mode as supported if the CPU has an MSR_HV bit
- * in the msr_mask. The mask can later be cleared by PAPR
- * mode but the hv mode support will remain, thus enforcing
- * that we cannot use priv. instructions in guest in PAPR
- * mode. For 970 we currently simply don't set HV in msr_mask
- * thus simulating an "Apple mode" 970. If we ever want to
- * support 970 HV mode, we'll have to add a processor attribute
- * of some sort.
+ /*
+ * Mark HV mode as supported if the CPU has an MSR_HV bit in the
+ * msr_mask. The mask can later be cleared by PAPR mode but the hv
+ * mode support will remain, thus enforcing that we cannot use
+ * priv. instructions in guest in PAPR mode. For 970 we currently
+ * simply don't set HV in msr_mask thus simulating an "Apple mode"
+ * 970. If we ever want to support 970 HV mode, we'll have to add
+ * a processor attribute of some sort.
*/
#if !defined(CONFIG_USER_ONLY)
env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
@@ -10573,7 +10626,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->tcg_initialize = ppc_translate_init;
#endif
cc->disas_set_info = ppc_disas_set_info;
-
+
dc->fw_name = "PowerPC,UNKNOWN";
}