aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-02-02 09:13:10 +0000
committerPeter Maydell <peter.maydell@linaro.org>2016-02-02 09:13:10 +0000
commit10ae9d76388e3f4a31f6a1475b5e2d1f28404a10 (patch)
tree46287b1f210d363a995dbab357e7c28c8c5ad14b /target-ppc
parent0430891ce162b986c6e02a7729a942ecd2a32ca4 (diff)
parentd1277156b5d3df6d75d138a7eec6ff80934cdcec (diff)
downloadqemu-10ae9d76388e3f4a31f6a1475b5e2d1f28404a10.zip
qemu-10ae9d76388e3f4a31f6a1475b5e2d1f28404a10.tar.gz
qemu-10ae9d76388e3f4a31f6a1475b5e2d1f28404a10.tar.bz2
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.6-20160201' into staging
ppc patch queue for 2016-02-01 Currently accumulated patches for target-ppc, pseries machine type and related devices. * Cleanup of error handling code in spapr * A number of fixes for Macintosh devices for the benefit of MacOS 9 and X * Remove some abuses of the RTAS memory access functions in spapr * Fixes for the gdbstub (and monitor debug) for VMX and VSX extensions. * Fix pseries machine hotplug memory under TCG * Clean up and extend handling of multiple page sizes with 64-bit hash MMUs * Fix to the TCG implementation of mcrfs # gpg: Signature made Mon 01 Feb 2016 02:28:34 GMT using RSA key ID 20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.6-20160201: (40 commits) target-ppc: mcrfs should always update FEX/VX and only clear exception bits target-ppc: Make every FPSCR_ macro have a corresponding FP_ macro target-ppc: Allow more page sizes for POWER7 & POWER8 in TCG target-ppc: Helper to determine page size information from hpte alone target-ppc: Add new TLB invalidate by HPTE call for hash64 MMUs target-ppc: Split 44x tlbiva from ppc_tlb_invalidate_one() target-ppc: Remove unused mmu models from ppc_tlb_invalidate_one target-ppc: Use actual page size encodings from HPTE target-ppc: Rework SLB page size lookup target-ppc: Rework ppc_store_slb target-ppc: Convert mmu-hash{32,64}.[ch] from CPUPPCState to PowerPCCPU target-ppc: Remove unused kvmppc_read_segment_page_sizes() stub uninorth.c: add support for UniNorth kMacRISCPCIAddressSelect (0x48) register cuda.c: return error for unknown commands pseries: Allow TCG h_enter to work with hotplugged memory target-ppc: gdbstub: Add VSX support target-ppc: gdbstub: fix spe registers for little-endian guests target-ppc: gdbstub: fix altivec registers for little-endian guests target-ppc: gdbstub: introduce avr_need_swap() target-ppc: gdbstub: fix float registers for little-endian guests ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu-models.c12
-rw-r--r--target-ppc/cpu-models.h4
-rw-r--r--target-ppc/cpu.h41
-rw-r--r--target-ppc/gdbstub.c10
-rw-r--r--target-ppc/helper.h1
-rw-r--r--target-ppc/kvm.c14
-rw-r--r--target-ppc/kvm_ppc.h5
-rw-r--r--target-ppc/machine.c22
-rw-r--r--target-ppc/mmu-hash32.c68
-rw-r--r--target-ppc/mmu-hash32.h30
-rw-r--r--target-ppc/mmu-hash64.c270
-rw-r--r--target-ppc/mmu-hash64.h30
-rw-r--r--target-ppc/mmu_helper.c85
-rw-r--r--target-ppc/translate.c23
-rw-r--r--target-ppc/translate_init.c129
15 files changed, 496 insertions, 248 deletions
diff --git a/target-ppc/cpu-models.c b/target-ppc/cpu-models.c
index 884e31d..ed005d7 100644
--- a/target-ppc/cpu-models.c
+++ b/target-ppc/cpu-models.c
@@ -1139,10 +1139,10 @@
"POWER7 v2.3")
POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
"POWER7+ v2.1")
- POWERPC_DEF("POWER8E_v1.0", CPU_POWERPC_POWER8E_v10, POWER8,
- "POWER8E v1.0")
- POWERPC_DEF("POWER8_v1.0", CPU_POWERPC_POWER8_v10, POWER8,
- "POWER8 v1.0")
+ POWERPC_DEF("POWER8E_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
+ "POWER8E v2.1")
+ POWERPC_DEF("POWER8_v2.0", CPU_POWERPC_POWER8_v20, POWER8,
+ "POWER8 v2.0")
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
@@ -1390,8 +1390,8 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "POWER5gs", "POWER5+_v2.1" },
{ "POWER7", "POWER7_v2.3" },
{ "POWER7+", "POWER7+_v2.1" },
- { "POWER8E", "POWER8E_v1.0" },
- { "POWER8", "POWER8_v1.0" },
+ { "POWER8E", "POWER8E_v2.1" },
+ { "POWER8", "POWER8_v2.0" },
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
diff --git a/target-ppc/cpu-models.h b/target-ppc/cpu-models.h
index 9d80e72..2992427 100644
--- a/target-ppc/cpu-models.h
+++ b/target-ppc/cpu-models.h
@@ -557,9 +557,9 @@ enum {
CPU_POWERPC_POWER7P_BASE = 0x004A0000,
CPU_POWERPC_POWER7P_v21 = 0x004A0201,
CPU_POWERPC_POWER8E_BASE = 0x004B0000,
- CPU_POWERPC_POWER8E_v10 = 0x004B0100,
+ CPU_POWERPC_POWER8E_v21 = 0x004B0201,
CPU_POWERPC_POWER8_BASE = 0x004D0000,
- CPU_POWERPC_POWER8_v10 = 0x004D0100,
+ CPU_POWERPC_POWER8_v20 = 0x004D0200,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 9706000..892f4dc9 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -419,6 +419,7 @@ typedef struct ppc_slb_t ppc_slb_t;
struct ppc_slb_t {
uint64_t esid;
uint64_t vsid;
+ const struct ppc_one_seg_page_size *sps;
};
#define MAX_SLB_ENTRIES 64
@@ -686,24 +687,43 @@ enum {
#define FP_FX (1ull << FPSCR_FX)
#define FP_FEX (1ull << FPSCR_FEX)
+#define FP_VX (1ull << FPSCR_VX)
#define FP_OX (1ull << FPSCR_OX)
-#define FP_OE (1ull << FPSCR_OE)
#define FP_UX (1ull << FPSCR_UX)
-#define FP_UE (1ull << FPSCR_UE)
-#define FP_XX (1ull << FPSCR_XX)
-#define FP_XE (1ull << FPSCR_XE)
#define FP_ZX (1ull << FPSCR_ZX)
-#define FP_ZE (1ull << FPSCR_ZE)
-#define FP_VX (1ull << FPSCR_VX)
+#define FP_XX (1ull << FPSCR_XX)
#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
#define FP_VXISI (1ull << FPSCR_VXISI)
-#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
-#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
#define FP_VXIDI (1ull << FPSCR_VXIDI)
+#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
+#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
#define FP_VXVC (1ull << FPSCR_VXVC)
+#define FP_FR (1ull << FSPCR_FR)
+#define FP_FI (1ull << FPSCR_FI)
+#define FP_C (1ull << FPSCR_C)
+#define FP_FL (1ull << FPSCR_FL)
+#define FP_FG (1ull << FPSCR_FG)
+#define FP_FE (1ull << FPSCR_FE)
+#define FP_FU (1ull << FPSCR_FU)
+#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
+#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
#define FP_VXCVI (1ull << FPSCR_VXCVI)
#define FP_VE (1ull << FPSCR_VE)
-#define FP_FI (1ull << FPSCR_FI)
+#define FP_OE (1ull << FPSCR_OE)
+#define FP_UE (1ull << FPSCR_UE)
+#define FP_ZE (1ull << FPSCR_ZE)
+#define FP_XE (1ull << FPSCR_XE)
+#define FP_NI (1ull << FPSCR_NI)
+#define FP_RN1 (1ull << FPSCR_RN1)
+#define FP_RN (1ull << FPSCR_RN)
+
+/* the exception bits which can be cleared by mcrfs - includes FX */
+#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
+ FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \
+ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
+ FP_VXSQRT | FP_VXCVI)
/*****************************************************************************/
/* Vector status and control register */
@@ -1210,7 +1230,7 @@ void ppc_store_msr (CPUPPCState *env, target_ulong value);
void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
int ppc_get_compat_smt_threads(PowerPCCPU *cpu);
-int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp);
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
@@ -2355,4 +2375,5 @@ int ppc_get_vcpu_dt_id(PowerPCCPU *cpu);
*/
PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id);
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
#endif /* !defined (__CPU_PPC_H__) */
diff --git a/target-ppc/gdbstub.c b/target-ppc/gdbstub.c
index ef4be23..569c380 100644
--- a/target-ppc/gdbstub.c
+++ b/target-ppc/gdbstub.c
@@ -88,7 +88,7 @@ static int ppc_gdb_register_len(int n)
the proper ordering for the binary, and cannot be changed.
For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
the current mode of the chip to see if we're running in little-endian. */
-static void maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
if (!msr_le) {
@@ -158,7 +158,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
break;
}
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
return r;
}
@@ -214,7 +214,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
break;
}
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
return r;
}
@@ -227,7 +227,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
if (!r) {
return r;
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
if (n < 32) {
/* gprs */
env->gpr[n] = ldtul_p(mem_buf);
@@ -277,7 +277,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
if (!r) {
return r;
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
if (n < 32) {
/* gprs */
env->gpr[n] = ldq_p(mem_buf);
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 869be15..e5a8f7b 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -544,6 +544,7 @@ DEF_HELPER_2(74xx_tlbd, void, env, tl)
DEF_HELPER_2(74xx_tlbi, void, env, tl)
DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_2(load_slb_esid, tl, env, tl)
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index c2e8912..70ca296 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -650,8 +650,13 @@ static int kvm_put_fp(CPUState *cs)
for (i = 0; i < 32; i++) {
uint64_t vsr[2];
+#ifdef HOST_WORDS_BIGENDIAN
vsr[0] = float64_val(env->fpr[i]);
vsr[1] = env->vsr[i];
+#else
+ vsr[0] = env->vsr[i];
+ vsr[1] = float64_val(env->fpr[i]);
+#endif
reg.addr = (uintptr_t) &vsr;
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
@@ -721,10 +726,17 @@ static int kvm_get_fp(CPUState *cs)
vsx ? "VSR" : "FPR", i, strerror(errno));
return ret;
} else {
+#ifdef HOST_WORDS_BIGENDIAN
env->fpr[i] = vsr[0];
if (vsx) {
env->vsr[i] = vsr[1];
}
+#else
+ env->fpr[i] = vsr[1];
+ if (vsx) {
+ env->vsr[i] = vsr[0];
+ }
+#endif
}
}
}
@@ -1193,7 +1205,7 @@ int kvm_arch_get_registers(CPUState *cs)
* Only restore valid entries
*/
if (rb & SLB_ESID_V) {
- ppc_store_slb(env, rb, rs);
+ ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
}
}
#endif
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index 5e1333d..62406ce 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -98,11 +98,6 @@ static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_l
return -1;
}
-static inline int kvmppc_read_segment_page_sizes(uint32_t *prop, int maxcells)
-{
- return -1;
-}
-
static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
{
return -1;
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
index 8e30b7a..692121e 100644
--- a/target-ppc/machine.c
+++ b/target-ppc/machine.c
@@ -3,6 +3,7 @@
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "helper_regs.h"
+#include "mmu-hash64.h"
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
{
@@ -169,7 +170,7 @@ static int cpu_post_load(void *opaque, int version_id)
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
env->lr = env->spr[SPR_LR];
env->ctr = env->spr[SPR_CTR];
- env->xer = env->spr[SPR_XER];
+ cpu_write_xer(env, env->spr[SPR_XER]);
#if defined(TARGET_PPC64)
env->cfar = env->spr[SPR_CFAR];
#endif
@@ -353,11 +354,30 @@ static bool slb_needed(void *opaque)
return (cpu->env.mmu_model & POWERPC_MMU_64);
}
+static int slb_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ /* We've pulled in the raw esid and vsid values from the migration
+ * stream, but we need to recompute the page size pointers */
+ for (i = 0; i < env->slb_nr; i++) {
+ if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
+ /* Migration source had bad values in its SLB */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static const VMStateDescription vmstate_slb = {
.name = "cpu/slb",
.version_id = 1,
.minimum_version_id = 1,
.needed = slb_needed,
+ .post_load = slb_post_load,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
diff --git a/target-ppc/mmu-hash32.c b/target-ppc/mmu-hash32.c
index 022e947..b076d80 100644
--- a/target-ppc/mmu-hash32.c
+++ b/target-ppc/mmu-hash32.c
@@ -84,9 +84,10 @@ static int ppc_hash32_pp_prot(int key, int pp, int nx)
return prot;
}
-static int ppc_hash32_pte_prot(CPUPPCState *env,
+static int ppc_hash32_pte_prot(PowerPCCPU *cpu,
target_ulong sr, ppc_hash_pte32_t pte)
{
+ CPUPPCState *env = &cpu->env;
unsigned pp, key;
key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
@@ -95,9 +96,11 @@ static int ppc_hash32_pte_prot(CPUPPCState *env,
return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
}
-static target_ulong hash32_bat_size(CPUPPCState *env,
+static target_ulong hash32_bat_size(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
+ CPUPPCState *env = &cpu->env;
+
if ((msr_pr && !(batu & BATU32_VP))
|| (!msr_pr && !(batu & BATU32_VS))) {
return 0;
@@ -106,7 +109,7 @@ static target_ulong hash32_bat_size(CPUPPCState *env,
return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
}
-static int hash32_bat_prot(CPUPPCState *env,
+static int hash32_bat_prot(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
int pp, prot;
@@ -122,7 +125,7 @@ static int hash32_bat_prot(CPUPPCState *env,
return prot;
}
-static target_ulong hash32_bat_601_size(CPUPPCState *env,
+static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
if (!(batl & BATL32_601_V)) {
@@ -132,9 +135,10 @@ static target_ulong hash32_bat_601_size(CPUPPCState *env,
return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
}
-static int hash32_bat_601_prot(CPUPPCState *env,
+static int hash32_bat_601_prot(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
+ CPUPPCState *env = &cpu->env;
int key, pp;
pp = batu & BATU32_601_PP;
@@ -146,9 +150,10 @@ static int hash32_bat_601_prot(CPUPPCState *env,
return ppc_hash32_pp_prot(key, pp, 0);
}
-static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
+static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
int *prot)
{
+ CPUPPCState *env = &cpu->env;
target_ulong *BATlt, *BATut;
int i;
@@ -167,9 +172,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
target_ulong mask;
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- mask = hash32_bat_601_size(env, batu, batl);
+ mask = hash32_bat_601_size(cpu, batu, batl);
} else {
- mask = hash32_bat_size(env, batu, batl);
+ mask = hash32_bat_size(cpu, batu, batl);
}
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n", __func__,
@@ -179,9 +184,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
hwaddr raddr = (batl & mask) | (ea & ~mask);
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- *prot = hash32_bat_601_prot(env, batu, batl);
+ *prot = hash32_bat_601_prot(cpu, batu, batl);
} else {
- *prot = hash32_bat_prot(env, batu, batl);
+ *prot = hash32_bat_prot(cpu, batu, batl);
}
return raddr & TARGET_PAGE_MASK;
@@ -210,11 +215,12 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
return -1;
}
-static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
+static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
target_ulong eaddr, int rwx,
hwaddr *raddr, int *prot)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
@@ -294,12 +300,14 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
}
}
-hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash)
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
{
+ CPUPPCState *env = &cpu->env;
+
return (hash * HASH_PTEG_SIZE_32) & env->htab_mask;
}
-static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
+static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
bool secondary, target_ulong ptem,
ppc_hash_pte32_t *pte)
{
@@ -308,8 +316,8 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
int i;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- pte0 = ppc_hash32_load_hpte0(env, pte_offset);
- pte1 = ppc_hash32_load_hpte1(env, pte_offset);
+ pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
+ pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
if ((pte0 & HPTE32_V_VALID)
&& (secondary == !!(pte0 & HPTE32_V_SECONDARY))
@@ -325,10 +333,11 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
return -1;
}
-static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
+static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
target_ulong sr, target_ulong eaddr,
ppc_hash_pte32_t *pte)
{
+ CPUPPCState *env = &cpu->env;
hwaddr pteg_off, pte_offset;
hwaddr hash;
uint32_t vsid, pgidx, ptem;
@@ -349,16 +358,16 @@ static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
" vsid=%" PRIx32 " ptem=%" PRIx32
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pteg_off = get_pteg_offset32(env, hash);
- pte_offset = ppc_hash32_pteg_search(env, pteg_off, 0, ptem, pte);
+ pteg_off = get_pteg_offset32(cpu, hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=%" PRIx32 " api=%" PRIx32
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pteg_off = get_pteg_offset32(env, ~hash);
- pte_offset = ppc_hash32_pteg_search(env, pteg_off, 1, ptem, pte);
+ pteg_off = get_pteg_offset32(cpu, ~hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
}
return pte_offset;
@@ -400,7 +409,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 2. Check Block Address Translation entries (BATs) */
if (env->nb_BATs != 0) {
- raddr = ppc_hash32_bat_lookup(env, eaddr, rwx, &prot);
+ raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot);
if (raddr != -1) {
if (need_prot[rwx] & ~prot) {
if (rwx == 2) {
@@ -431,7 +440,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 4. Handle direct store segments */
if (sr & SR32_T) {
- if (ppc_hash32_direct_store(env, sr, eaddr, rwx,
+ if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx,
&raddr, &prot) == 0) {
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
@@ -450,7 +459,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
}
/* 6. Locate the PTE in the hash table */
- pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISI;
@@ -473,7 +482,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 7. Check access permissions */
- prot = ppc_hash32_pte_prot(env, sr, pte);
+ prot = ppc_hash32_pte_prot(cpu, sr, pte);
if (need_prot[rwx] & ~prot) {
/* Access right violation */
@@ -508,7 +517,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
}
if (new_pte1 != pte.pte1) {
- ppc_hash32_store_hpte1(env, pte_offset, new_pte1);
+ ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
}
/* 9. Determine the real address from the PTE */
@@ -521,8 +530,9 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
return 0;
}
-hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
+ CPUPPCState *env = &cpu->env;
target_ulong sr;
hwaddr pte_offset;
ppc_hash_pte32_t pte;
@@ -534,7 +544,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
}
if (env->nb_BATs != 0) {
- hwaddr raddr = ppc_hash32_bat_lookup(env, eaddr, 0, &prot);
+ hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot);
if (raddr != -1) {
return raddr;
}
@@ -547,7 +557,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
return -1;
}
- pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
if (pte_offset == -1) {
return -1;
}
diff --git a/target-ppc/mmu-hash32.h b/target-ppc/mmu-hash32.h
index d515d4f..afbb9dd 100644
--- a/target-ppc/mmu-hash32.h
+++ b/target-ppc/mmu-hash32.h
@@ -3,8 +3,8 @@
#ifndef CONFIG_USER_ONLY
-hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash);
-hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
@@ -65,40 +65,42 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
#define HPTE32_R_WIMG 0x00000078
#define HPTE32_R_PP 0x00000003
-static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
+static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
hwaddr pte_offset)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(cs->as, env->htab_base + pte_offset);
+ return ldl_phys(CPU(cpu)->as, env->htab_base + pte_offset);
}
-static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
+static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
hwaddr pte_offset)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
+ return ldl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2);
}
-static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
+static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
hwaddr pte_offset, target_ulong pte0)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(cs->as, env->htab_base + pte_offset, pte0);
+ stl_phys(CPU(cpu)->as, env->htab_base + pte_offset, pte0);
}
-static inline void ppc_hash32_store_hpte1(CPUPPCState *env,
+static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
hwaddr pte_offset, target_ulong pte1)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
+ stl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
}
typedef struct {
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 8d28ed4..6d110ee 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -20,7 +20,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
@@ -41,8 +43,9 @@ bool kvmppc_kern_htab;
* SLB handling
*/
-static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
+static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
{
+ CPUPPCState *env = &cpu->env;
uint64_t esid_256M, esid_1T;
int n;
@@ -70,12 +73,13 @@ static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
return NULL;
}
-void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
int i;
uint64_t slbe, slbv;
- cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
+ cpu_synchronize_state(CPU(cpu));
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
for (i = 0; i < env->slb_nr; i++) {
@@ -118,7 +122,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_slb_t *slb;
- slb = slb_lookup(env, addr);
+ slb = slb_lookup(cpu, addr);
if (!slb) {
return;
}
@@ -134,35 +138,62 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
}
}
-int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid)
{
- int slot = rb & 0xfff;
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb = &env->slb[slot];
+ const struct ppc_one_seg_page_size *sps = NULL;
+ int i;
- if (rb & (0x1000 - env->slb_nr)) {
- return -1; /* Reserved bits set or slot too high */
+ if (slot >= env->slb_nr) {
+ return -1; /* Bad slot number */
+ }
+ if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
+ return -1; /* Reserved bits set */
}
- if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
+ if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
return -1; /* Bad segment size */
}
- if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
+ if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
return -1; /* 1T segment on MMU that doesn't support it */
}
- /* Mask out the slot number as we store the entry */
- slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
- slb->vsid = rs;
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
+ " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
+ slot, esid, vsid);
+ return -1;
+ }
+
+ slb->esid = esid;
+ slb->vsid = vsid;
+ slb->sps = sps;
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
- " %016" PRIx64 "\n", __func__, slot, rb, rs,
+ " %016" PRIx64 "\n", __func__, slot, esid, vsid,
slb->esid, slb->vsid);
return 0;
}
-static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
+static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
target_ulong *rt)
{
+ CPUPPCState *env = &cpu->env;
int slot = rb & 0xfff;
ppc_slb_t *slb = &env->slb[slot];
@@ -174,9 +205,10 @@ static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
return 0;
}
-static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
+static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
target_ulong *rt)
{
+ CPUPPCState *env = &cpu->env;
int slot = rb & 0xfff;
ppc_slb_t *slb = &env->slb[slot];
@@ -190,7 +222,9 @@ static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
- if (ppc_store_slb(env, rb, rs) < 0) {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -198,9 +232,10 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
target_ulong rt = 0;
- if (ppc_load_slb_esid(env, rb, &rt) < 0) {
+ if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -209,9 +244,10 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
target_ulong rt = 0;
- if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
+ if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -222,9 +258,10 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
* 64-bit hash table MMU handling
*/
-static int ppc_hash64_pte_prot(CPUPPCState *env,
+static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
ppc_slb_t *slb, ppc_hash_pte64_t pte)
{
+ CPUPPCState *env = &cpu->env;
unsigned pp, key;
/* Some pp bit combinations have undefined behaviour, so default
* to no access in those cases */
@@ -274,12 +311,12 @@ static int ppc_hash64_pte_prot(CPUPPCState *env,
return prot;
}
-static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
+static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
{
+ CPUPPCState *env = &cpu->env;
int key, amrbits;
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
-
/* Only recent MMUs implement Virtual Page Class Key Protection */
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
return prot;
@@ -348,23 +385,24 @@ void ppc_hash64_stop_access(uint64_t token)
}
}
-static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
+static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
bool secondary, target_ulong ptem,
ppc_hash_pte64_t *pte)
{
+ CPUPPCState *env = &cpu->env;
int i;
uint64_t token;
target_ulong pte0, pte1;
target_ulong pte_index;
pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
- token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
+ token = ppc_hash64_start_access(cpu, pte_index);
if (!token) {
return -1;
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
- pte0 = ppc_hash64_load_hpte0(env, token, i);
- pte1 = ppc_hash64_load_hpte1(env, token, i);
+ pte0 = ppc_hash64_load_hpte0(cpu, token, i);
+ pte1 = ppc_hash64_load_hpte1(cpu, token, i);
if ((pte0 & HPTE64_V_VALID)
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
@@ -382,45 +420,31 @@ static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
return -1;
}
-static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
-{
- uint64_t epnshift;
-
- /* Page size according to the SLB, which we use to generate the
- * EPN for hash table lookup.. When we implement more recent MMU
- * extensions this might be different from the actual page size
- * encoded in the PTE */
- if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
- epnshift = TARGET_PAGE_BITS;
- } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
- epnshift = TARGET_PAGE_BITS_64K;
- } else {
- epnshift = TARGET_PAGE_BITS_16M;
- }
- return epnshift;
-}
-
-static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
+static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
ppc_slb_t *slb, target_ulong eaddr,
ppc_hash_pte64_t *pte)
{
+ CPUPPCState *env = &cpu->env;
hwaddr pte_offset;
hwaddr hash;
- uint64_t vsid, epnshift, epnmask, epn, ptem;
+ uint64_t vsid, epnmask, epn, ptem;
+
+ /* The SLB store path should prevent any bad page size encodings
+ * getting in there, so: */
+ assert(slb->sps);
- epnshift = ppc_hash64_page_shift(slb);
- epnmask = ~((1ULL << epnshift) - 1);
+ epnmask = ~((1ULL << slb->sps->page_shift) - 1);
if (slb->vsid & SLB_VSID_B) {
/* 1TB segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
- hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
+ hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
} else {
/* 256M segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
- hash = vsid ^ (epn >> epnshift);
+ hash = vsid ^ (epn >> slb->sps->page_shift);
}
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
@@ -436,7 +460,7 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
@@ -446,24 +470,82 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
}
return pte_offset;
}
-static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
- target_ulong eaddr)
+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ if (sps->page_shift != 12) {
+ /* 4kiB page in a non 4kiB segment */
+ return 0;
+ }
+ /* Normal 4kiB page */
+ return 12;
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_page_size *ps = &sps->enc[i];
+ uint64_t mask;
+
+ if (!ps->page_shift) {
+ break;
+ }
+
+ if (ps->page_shift == 12) {
+ /* L bit is set so this can't be a 4kiB page */
+ continue;
+ }
+
+ mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+ if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+ return ps->page_shift;
+ }
+ }
+
+ return 0; /* Bad page size encoding */
+}
+
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift)
{
- hwaddr mask;
- int target_page_bits;
- hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ *seg_page_shift = 12;
+ return 12;
+ }
+
/*
- * We support 4K, 64K and 16M now
+ * The encodings in env->sps need to be carefully chosen so that
+ * this gives an unambiguous result.
*/
- target_page_bits = ppc_hash64_page_shift(slb);
- mask = (1ULL << target_page_bits) - 1;
- return (rpn & ~mask) | (eaddr & mask);
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+ unsigned shift;
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ shift = hpte_page_shift(sps, pte0, pte1);
+ if (shift) {
+ *seg_page_shift = sps->page_shift;
+ return shift;
+ }
+ }
+
+ *seg_page_shift = 0;
+ return 0;
}
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
@@ -472,6 +554,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
+ unsigned apshift;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
int pp_prot, amr_prot, prot;
@@ -493,7 +576,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
/* 2. Translation is on, so look up the SLB */
- slb = slb_lookup(env, eaddr);
+ slb = slb_lookup(cpu, eaddr);
if (!slb) {
if (rwx == 2) {
@@ -515,7 +598,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
/* 4. Locate the PTE in the hash table */
- pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISI;
@@ -535,10 +618,22 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
qemu_log_mask(CPU_LOG_MMU,
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
+ /* Validate page size encoding */
+ apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+ if (!apshift) {
+ error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
+ " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
+ /* Not entirely sure what the right action here, but machine
+ * check seems reasonable */
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ return 1;
+ }
+
/* 5. Check access permissions */
- pp_prot = ppc_hash64_pte_prot(env, slb, pte);
- amr_prot = ppc_hash64_amr_prot(env, pte);
+ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
+ amr_prot = ppc_hash64_amr_prot(cpu, pte);
prot = pp_prot & amr_prot;
if ((need_prot[rwx] & ~prot) != 0) {
@@ -581,49 +676,57 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
if (new_pte1 != pte.pte1) {
- ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
+ ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
pte.pte0, new_pte1);
}
/* 7. Determine the real address from the PTE */
- raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
+ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, mmu_idx, 1ULL << apshift);
return 0;
}
-hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
+ unsigned apshift;
if (msr_dr == 0) {
/* In real mode the top 4 effective address bits are ignored */
return addr & 0x0FFFFFFFFFFFFFFFULL;
}
- slb = slb_lookup(env, addr);
+ slb = slb_lookup(cpu, addr);
if (!slb) {
return -1;
}
- pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
if (pte_offset == -1) {
return -1;
}
- return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
+ apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+ if (!apshift) {
+ return -1;
+ }
+
+ return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
+ & TARGET_PAGE_MASK;
}
-void ppc_hash64_store_hpte(CPUPPCState *env,
+void ppc_hash64_store_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
if (kvmppc_kern_htab) {
kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
@@ -633,9 +736,22 @@ void ppc_hash64_store_hpte(CPUPPCState *env,
pte_index *= HASH_PTE_SIZE_64;
if (env->external_htab) {
stq_p(env->external_htab + pte_index, pte0);
- stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
} else {
- stq_phys(cs->as, env->htab_base + pte_index, pte0);
- stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
+ stq_phys(CPU(cpu)->as,
+ env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
}
}
+
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ /*
+ * XXX: given the fact that there are too many segments to
+ * invalidate, and we still don't have a tlb_flush_mask(env, n,
+ * mask) in QEMU, we just invalidate all TLBs
+ */
+ tlb_flush(CPU(cpu), 1);
+}
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 291750f..ab0f86b 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -4,13 +4,21 @@
#ifndef CONFIG_USER_ONLY
#ifdef TARGET_PPC64
-void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
-int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
-hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
+void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid);
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
-void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
+void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
target_ulong pte0, target_ulong pte1);
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1);
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift);
#endif
/*
@@ -40,6 +48,8 @@ void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
#define SLB_VSID_4K 0x0000000000000000ULL
#define SLB_VSID_64K 0x0000000000000110ULL
+#define SLB_VSID_16M 0x0000000000000100ULL
+#define SLB_VSID_16G 0x0000000000000120ULL
/*
* Hash page table definitions
@@ -85,31 +95,31 @@ extern bool kvmppc_kern_htab;
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
void ppc_hash64_stop_access(uint64_t token);
-static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
+static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
uint64_t token, int index)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
uint64_t addr;
addr = token + (index * HASH_PTE_SIZE_64);
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
} else {
- return ldq_phys(cs->as, addr);
+ return ldq_phys(CPU(cpu)->as, addr);
}
}
-static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
+static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
uint64_t token, int index)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
uint64_t addr;
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
} else {
- return ldq_phys(cs->as, addr);
+ return ldq_phys(CPU(cpu)->as, addr);
}
}
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index d4f7926..de4e286 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -658,32 +658,6 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
tlb_flush(CPU(cpu), 1);
}
-static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
- target_ulong eaddr, uint32_t pid)
-{
-#if !defined(FLUSH_ALL_TLBS)
- CPUState *cs = CPU(ppc_env_get_cpu(env));
- ppcemb_tlb_t *tlb;
- hwaddr raddr;
- target_ulong page, end;
- int i;
-
- for (i = 0; i < env->nb_tlb; i++) {
- tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
- end = tlb->EPN + tlb->size;
- for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(cs, page);
- }
- tlb->prot &= ~PAGE_VALID;
- break;
- }
- }
-#else
- ppc4xx_tlb_invalidate_all(env);
-#endif
-}
-
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong address, int rw,
int access_type)
@@ -1298,7 +1272,7 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_07:
case POWERPC_MMU_2_07a:
- dump_slb(f, cpu_fprintf, env);
+ dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
break;
#endif
default:
@@ -1440,12 +1414,12 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_07:
case POWERPC_MMU_2_07a:
- return ppc_hash64_get_phys_page_debug(env, addr);
+ return ppc_hash64_get_phys_page_debug(cpu, addr);
#endif
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
- return ppc_hash32_get_phys_page_debug(env, addr);
+ return ppc_hash32_get_phys_page_debug(cpu, addr);
default:
;
@@ -1511,6 +1485,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
mmu_ctx_t ctx;
int access_type;
int ret = 0;
@@ -1612,9 +1587,9 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
tlb_miss:
env->error_code |= ctx.key << 19;
env->spr[SPR_HASH1] = env->htab_base +
- get_pteg_offset32(env, ctx.hash[0]);
+ get_pteg_offset32(cpu, ctx.hash[0]);
env->spr[SPR_HASH2] = env->htab_base +
- get_pteg_offset32(env, ctx.hash[1]);
+ get_pteg_offset32(cpu, ctx.hash[1]);
break;
case POWERPC_MMU_SOFT_74xx:
if (rw == 1) {
@@ -1971,25 +1946,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
ppc6xx_tlb_invalidate_virt(env, addr, 1);
}
break;
- case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
- ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
- break;
- case POWERPC_MMU_REAL:
- cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
- break;
- case POWERPC_MMU_MPC8xx:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
- break;
- case POWERPC_MMU_BOOKE:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
- break;
- case POWERPC_MMU_BOOKE206:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "BookE 2.06 MMU model is not implemented\n");
- break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
/* tlbie invalidate TLBs for all segments */
@@ -2031,9 +1987,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
#endif /* defined(TARGET_PPC64) */
default:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "Unknown MMU model\n");
- break;
+ /* Should never reach here with other MMU models */
+ assert(0);
}
#else
ppc_tlb_invalidate_all(env);
@@ -2088,21 +2043,17 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
- uint64_t rb = 0, rs = 0;
+ uint64_t esid, vsid;
/* ESID = srnum */
- rb |= ((uint32_t)srnum & 0xf) << 28;
- /* Set the valid bit */
- rb |= SLB_ESID_V;
- /* Index = ESID */
- rb |= (uint32_t)srnum;
+ esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
/* VSID = VSID */
- rs |= (value & 0xfffffff) << 12;
+ vsid = (value & 0xfffffff) << 12;
/* flags = flags */
- rs |= ((value >> 27) & 0xf) << 8;
+ vsid |= ((value >> 27) & 0xf) << 8;
- ppc_store_slb(env, rb, rs);
+ ppc_store_slb(cpu, srnum, esid, vsid);
} else
#endif
if (env->sr[srnum] != value) {
@@ -2136,6 +2087,16 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr)
ppc_tlb_invalidate_one(env, addr);
}
+void helper_tlbiva(CPUPPCState *env, target_ulong addr)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ /* tlbiva instruction only exists on BookE */
+ assert(env->mmu_model == POWERPC_MMU_BOOKE);
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
+}
+
/* Software driven TLBs management */
/* PowerPC 602/603 software TLB load instructions helpers */
static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 3beeb45..7db3145 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -2501,18 +2501,31 @@ static void gen_fmrgow(DisasContext *ctx)
static void gen_mcrfs(DisasContext *ctx)
{
TCGv tmp = tcg_temp_new();
+ TCGv_i32 tmask;
+ TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
int bfa;
+ int nibble;
+ int shift;
if (unlikely(!ctx->fpu_enabled)) {
gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- bfa = 4 * (7 - crfS(ctx->opcode));
- tcg_gen_shri_tl(tmp, cpu_fpscr, bfa);
+ bfa = crfS(ctx->opcode);
+ nibble = 7 - bfa;
+ shift = 4 * nibble;
+ tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
- tcg_temp_free(tmp);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
- tcg_gen_andi_tl(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
+ tcg_temp_free(tmp);
+ tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
+ /* Only the exception bits (including FX) should be cleared if read */
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ /* FEX and VX need to be updated, so don't set fpscr directly */
+ tmask = tcg_const_i32(1 << nibble);
+ gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ tcg_temp_free_i32(tmask);
+ tcg_temp_free_i64(tnew_fpscr);
}
/* mffs */
@@ -5905,7 +5918,7 @@ static void gen_tlbiva(DisasContext *ctx)
}
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
tcg_temp_free(t0);
#endif
}
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 76d5da1..cdd18ac 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -8105,6 +8105,36 @@ static Property powerpc_servercpu_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+#ifdef CONFIG_SOFTMMU
+static const struct ppc_segment_page_sizes POWER7_POWER8_sps = {
+ .sps = {
+ {
+ .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 },
+ { .page_shift = 16, .pte_enc = 0x7 },
+ { .page_shift = 24, .pte_enc = 0x38 }, },
+ },
+ {
+ .page_shift = 16, /* 64K */
+ .slb_enc = SLB_VSID_64K,
+ .enc = { { .page_shift = 16, .pte_enc = 0x1 },
+ { .page_shift = 24, .pte_enc = 0x8 }, },
+ },
+ {
+ .page_shift = 24, /* 16M */
+ .slb_enc = SLB_VSID_16M,
+ .enc = { { .page_shift = 24, .pte_enc = 0 }, },
+ },
+ {
+ .page_shift = 34, /* 16G */
+ .slb_enc = SLB_VSID_16G,
+ .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
+ },
+ }
+};
+#endif /* CONFIG_SOFTMMU */
+
static void init_proc_POWER7 (CPUPPCState *env)
{
init_proc_book3s_64(env, BOOK3S_CPU_POWER7);
@@ -8168,6 +8198,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
#endif
pcc->excp_model = POWERPC_EXCP_POWER7;
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
@@ -8248,6 +8279,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->mmu_model = POWERPC_MMU_2_07;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
#endif
pcc->excp_model = POWERPC_EXCP_POWER7;
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
@@ -8750,14 +8782,25 @@ static void dump_ppc_insns (CPUPPCState *env)
}
#endif
+static bool avr_need_swap(CPUPPCState *env)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ return msr_le;
+#else
+ return !msr_le;
+#endif
+}
+
static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
stfq_p(mem_buf, env->fpr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
return 8;
}
if (n == 32) {
stl_p(mem_buf, env->fpscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8766,10 +8809,12 @@ static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
env->fpr[n] = ldfq_p(mem_buf);
return 8;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff);
return 4;
}
@@ -8779,21 +8824,25 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
-#ifdef HOST_WORDS_BIGENDIAN
- stq_p(mem_buf, env->avr[n].u64[0]);
- stq_p(mem_buf+8, env->avr[n].u64[1]);
-#else
- stq_p(mem_buf, env->avr[n].u64[1]);
- stq_p(mem_buf+8, env->avr[n].u64[0]);
-#endif
+ if (!avr_need_swap(env)) {
+ stq_p(mem_buf, env->avr[n].u64[0]);
+ stq_p(mem_buf+8, env->avr[n].u64[1]);
+ } else {
+ stq_p(mem_buf, env->avr[n].u64[1]);
+ stq_p(mem_buf+8, env->avr[n].u64[0]);
+ }
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
return 16;
}
if (n == 32) {
stl_p(mem_buf, env->vscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
if (n == 33) {
stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8802,20 +8851,24 @@ static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
-#ifdef HOST_WORDS_BIGENDIAN
- env->avr[n].u64[0] = ldq_p(mem_buf);
- env->avr[n].u64[1] = ldq_p(mem_buf+8);
-#else
- env->avr[n].u64[1] = ldq_p(mem_buf);
- env->avr[n].u64[0] = ldq_p(mem_buf+8);
-#endif
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
+ if (!avr_need_swap(env)) {
+ env->avr[n].u64[0] = ldq_p(mem_buf);
+ env->avr[n].u64[1] = ldq_p(mem_buf+8);
+ } else {
+ env->avr[n].u64[1] = ldq_p(mem_buf);
+ env->avr[n].u64[0] = ldq_p(mem_buf+8);
+ }
return 16;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->vscr = ldl_p(mem_buf);
return 4;
}
if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
return 4;
}
@@ -8827,6 +8880,7 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
if (n < 32) {
#if defined(TARGET_PPC64)
stl_p(mem_buf, env->gpr[n] >> 32);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
#else
stl_p(mem_buf, env->gprh[n]);
#endif
@@ -8834,10 +8888,12 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
}
if (n == 32) {
stq_p(mem_buf, env->spe_acc);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
return 8;
}
if (n == 33) {
stl_p(mem_buf, env->spe_fscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8848,7 +8904,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
if (n < 32) {
#if defined(TARGET_PPC64)
target_ulong lo = (uint32_t)env->gpr[n];
- target_ulong hi = (target_ulong)ldl_p(mem_buf) << 32;
+ target_ulong hi;
+
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+
+ hi = (target_ulong)ldl_p(mem_buf) << 32;
env->gpr[n] = lo | hi;
#else
env->gprh[n] = ldl_p(mem_buf);
@@ -8856,16 +8916,38 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 4;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
env->spe_acc = ldq_p(mem_buf);
return 8;
}
if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->spe_fscr = ldl_p(mem_buf);
return 4;
}
return 0;
}
+static int gdb_get_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ stq_p(mem_buf, env->vsr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ return 0;
+}
+
+static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->vsr[n] = ldq_p(mem_buf);
+ return 8;
+ }
+ return 0;
+}
+
static int ppc_fixup_cpu(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
@@ -8971,6 +9053,10 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
34, "power-spe.xml", 0);
}
+ if (pcc->insns_flags2 & PPC2_VSX) {
+ gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
+ 32, "power-vsx.xml", 0);
+ }
qemu_init_vcpu(cs);
@@ -9185,7 +9271,7 @@ int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
return ret;
}
-int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
{
int ret = 0;
CPUPPCState *env = &cpu->env;
@@ -9207,12 +9293,13 @@ int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
break;
}
- if (kvm_enabled() && kvmppc_set_compat(cpu, cpu->cpu_version) < 0) {
- error_report("Unable to set compatibility mode in KVM");
- ret = -1;
+ if (kvm_enabled()) {
+ ret = kvmppc_set_compat(cpu, cpu->cpu_version);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Unable to set CPU compatibility mode in KVM");
+ }
}
-
- return ret;
}
static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)