aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2016-01-27 12:01:20 +1100
committerDavid Gibson <david@gibson.dropbear.id.au>2016-01-30 23:49:27 +1100
commit1114e712c998d6c6d888e8a22aab94e143ae3fd8 (patch)
tree3dbd651703d54b6f26135d1990c648b607c9d860
parent61a36c9b5a12889994e6c45f4a175efcd63936db (diff)
downloadqemu-1114e712c998d6c6d888e8a22aab94e143ae3fd8.zip
qemu-1114e712c998d6c6d888e8a22aab94e143ae3fd8.tar.gz
qemu-1114e712c998d6c6d888e8a22aab94e143ae3fd8.tar.bz2
target-ppc: Helper to determine page size information from hpte alone
h_enter() in the spapr code needs to know the page size of the HPTE it's about to insert. Unlike other paths that do this, it doesn't have access to the SLB, so at the moment it determines this with some open-coded tests which assume POWER7 or POWER8 page size encodings. To make this more flexible add ppc_hash64_hpte_page_shift_noslb() to determine both the "base" page size per segment, and the individual effective page size from an HPTE alone. This means that the spapr code should now be able to handle any page size listed in the env->sps table. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Reviewed-by: Alexander Graf <agraf@suse.de>
-rw-r--r--hw/ppc/spapr_hcall.c25
-rw-r--r--target-ppc/mmu-hash64.c35
-rw-r--r--target-ppc/mmu-hash64.h3
3 files changed, 44 insertions, 19 deletions
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 0a8378c..12f8c33 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -73,31 +73,18 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong pte_index = args[1];
target_ulong pteh = args[2];
target_ulong ptel = args[3];
- target_ulong page_shift = 12;
+ unsigned apshift, spshift;
target_ulong raddr;
target_ulong index;
uint64_t token;
- /* only handle 4k and 16M pages for now */
- if (pteh & HPTE64_V_LARGE) {
-#if 0 /* We don't support 64k pages yet */
- if ((ptel & 0xf000) == 0x1000) {
- /* 64k page */
- } else
-#endif
- if ((ptel & 0xff000) == 0) {
- /* 16M page */
- page_shift = 24;
- /* lowest AVA bit must be 0 for 16M pages */
- if (pteh & 0x80) {
- return H_PARAMETER;
- }
- } else {
- return H_PARAMETER;
- }
+ apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
+ if (!apshift) {
+ /* Bad page size encoding */
+ return H_PARAMETER;
}
- raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
+ raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
if (is_ram_address(spapr, raddr)) {
/* Regular RAM - should have WIMG=0010 */
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 565a0f4..6d110ee 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -513,6 +513,41 @@ static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
return 0; /* Bad page size encoding */
}
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ *seg_page_shift = 12;
+ return 12;
+ }
+
+ /*
+ * The encodings in env->sps need to be carefully chosen so that
+ * this gives an unambiguous result.
+ */
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+ unsigned shift;
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ shift = hpte_page_shift(sps, pte0, pte1);
+ if (shift) {
+ *seg_page_shift = sps->page_shift;
+ return shift;
+ }
+ }
+
+ *seg_page_shift = 0;
+ return 0;
+}
+
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
int rwx, int mmu_idx)
{
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 293a951..34cf975 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -16,6 +16,9 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift);
#endif
/*