aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/mmu-hash64.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2013-03-12 00:31:08 +0000
committerAlexander Graf <agraf@suse.de>2013-03-22 15:28:47 +0100
commitc69b6151e7f242b02f261f321c392e5ef933176f (patch)
tree37ba5ff96619d955b11ee89ac2aa8cf499c734f8 /target-ppc/mmu-hash64.c
parent9d7c3f4a2935a70e7299a6862792bbfc48d62211 (diff)
downloadqemu-c69b6151e7f242b02f261f321c392e5ef933176f.zip
qemu-c69b6151e7f242b02f261f321c392e5ef933176f.tar.gz
qemu-c69b6151e7f242b02f261f321c392e5ef933176f.tar.bz2
target-ppc: Disentangle find_pte()
32-bit and 64-bit hash MMU implementations currently share a find_pte function. This results in a whole bunch of ugly conditionals in the shared function, and not all that much actually shared code. This patch separates out the 32-bit and 64-bit versions, putting then in mmu-hash64.c and mmu-has32.c, and removes the conditionals from both versions. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc/mmu-hash64.c')
-rw-r--r--target-ppc/mmu-hash64.c79
1 files changed, 77 insertions, 2 deletions
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 9c0de1b..a525bd5 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -231,8 +231,8 @@ static inline int pte64_is_valid(target_ulong pte0)
return pte0 & 0x0000000000000001ULL ? 1 : 0;
}
-int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
- target_ulong pte1, int h, int rw, int type)
+static int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
+ target_ulong pte1, int h, int rw, int type)
{
target_ulong ptem, mmask;
int access, ret, pteh, ptev, pp;
@@ -274,3 +274,78 @@ int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
return ret;
}
+
+/* PTE table lookup */
+int find_pte64(CPUPPCState *env, mmu_ctx_t *ctx, int h,
+ int rw, int type, int target_page_bits)
+{
+ hwaddr pteg_off;
+ target_ulong pte0, pte1;
+ int i, good = -1;
+ int ret, r;
+
+ ret = -1; /* No entry found */
+ pteg_off = get_pteg_offset(env, ctx->hash[h], HASH_PTE_SIZE_64);
+ for (i = 0; i < 8; i++) {
+ if (env->external_htab) {
+ pte0 = ldq_p(env->external_htab + pteg_off + (i * 16));
+ pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8);
+ } else {
+ pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16));
+ pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8);
+ }
+
+ r = pte64_check(ctx, pte0, pte1, h, rw, type);
+ LOG_MMU("Load pte from %016" HWADDR_PRIx " => " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
+ pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h,
+ (int)((pte0 >> 1) & 1), ctx->ptem);
+ switch (r) {
+ case -3:
+ /* PTE inconsistency */
+ return -1;
+ case -2:
+ /* Access violation */
+ ret = -2;
+ good = i;
+ break;
+ case -1:
+ default:
+ /* No PTE match */
+ break;
+ case 0:
+ /* access granted */
+ /* XXX: we should go on looping to check all PTEs consistency
+ * but if we can speed-up the whole thing as the
+ * result would be undefined if PTEs are not consistent.
+ */
+ ret = 0;
+ good = i;
+ goto done;
+ }
+ }
+ if (good != -1) {
+ done:
+ LOG_MMU("found PTE at addr %08" HWADDR_PRIx " prot=%01x ret=%d\n",
+ ctx->raddr, ctx->prot, ret);
+ /* Update page flags */
+ pte1 = ctx->raddr;
+ if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
+ if (env->external_htab) {
+ stq_p(env->external_htab + pteg_off + (good * 16) + 8,
+ pte1);
+ } else {
+ stq_phys_notdirty(env->htab_base + pteg_off +
+ (good * 16) + 8, pte1);
+ }
+ }
+ }
+
+ /* We have a TLB that saves 4K pages, so let's
+ * split a huge page to 4k chunks */
+ if (target_page_bits != TARGET_PAGE_BITS) {
+ ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
+ & TARGET_PAGE_MASK;
+ }
+ return ret;
+}