aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorCédric Le Goater <clg@kaod.org>2020-04-03 16:00:53 +0200
committerDavid Gibson <david@gibson.dropbear.id.au>2020-05-07 11:10:50 +1000
commitd92baf00aad9bba521c2b3cb23fe388c2067aaa0 (patch)
treef911a28822ff1481776606f867508ccfbc6733f2 /target
parent05af7c77f5f24ef2bec25f2cab22170c29edaf37 (diff)
downloadqemu-d92baf00aad9bba521c2b3cb23fe388c2067aaa0.zip
qemu-d92baf00aad9bba521c2b3cb23fe388c2067aaa0.tar.gz
qemu-d92baf00aad9bba521c2b3cb23fe388c2067aaa0.tar.bz2
target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation
This is moving code under a new ppc_radix64_xlate() routine shared by the MMU Radix page fault handler and the 'get_phys_page_debug' PPC callback. The difference being that 'get_phys_page_debug' does not generate exceptions. The specific part of process-scoped Radix translation is moved under ppc_radix64_process_scoped_xlate() in preparation of the future support for partition-scoped Radix translation. Routines raising the exceptions now take a 'cause_excp' bool to cover the 'get_phys_page_debug' case. It should be functionally equivalent. Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com> Signed-off-by: Cédric Le Goater <clg@kaod.org> Message-Id: <20200403140056.59465-2-clg@kaod.org> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target')
-rw-r--r--target/ppc/mmu-radix64.c219
1 files changed, 123 insertions, 96 deletions
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index d2422d1..4b0d0ff 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -219,17 +219,127 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
return true;
}
+static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
+ vaddr eaddr, uint64_t pid,
+ ppc_v3_pate_t pate, hwaddr *g_raddr,
+ int *g_prot, int *g_page_size,
+ bool cause_excp)
+{
+ CPUState *cs = CPU(cpu);
+ uint64_t offset, size, prtbe_addr, prtbe0, pte;
+ int fault_cause = 0;
+ hwaddr pte_addr;
+
+ /* Index Process Table by PID to Find Corresponding Process Table Entry */
+ offset = pid * sizeof(struct prtb_entry);
+ size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
+ if (offset >= size) {
+ /* offset exceeds size of the process table */
+ if (cause_excp) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
+ }
+ return 1;
+ }
+ prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
+ prtbe0 = ldq_phys(cs->as, prtbe_addr);
+
+ /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
+ *g_page_size = PRTBE_R_GET_RTS(prtbe0);
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
+ prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
+ g_raddr, g_page_size, &fault_cause, &pte_addr);
+
+ if (!(pte & R_PTE_VALID) ||
+ ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot)) {
+ /* No valid pte or access denied due to protection */
+ if (cause_excp) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
+ }
+ return 1;
+ }
+
+ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
+
+ return 0;
+}
+
+static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
+ bool relocation,
+ hwaddr *raddr, int *psizep, int *protp,
+ bool cause_excp)
+{
+ uint64_t lpid = 0, pid = 0;
+ ppc_v3_pate_t pate;
+ int psize, prot;
+ hwaddr g_raddr;
+
+ /* Virtual Mode Access - get the fully qualified address */
+ if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
+ if (cause_excp) {
+ ppc_radix64_raise_segi(cpu, rwx, eaddr);
+ }
+ return 1;
+ }
+
+ /* Get Process Table */
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc;
+ vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->get_pate(cpu->vhyp, &pate);
+ } else {
+ if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
+ if (cause_excp) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
+ }
+ return 1;
+ }
+ if (!validate_pate(cpu, lpid, &pate)) {
+ if (cause_excp) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
+ }
+ return 1;
+ }
+ /* We don't support guest mode yet */
+ if (lpid != 0) {
+ error_report("PowerNV guest support Unimplemented");
+ exit(1);
+ }
+ }
+
+ *psizep = INT_MAX;
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ /*
+ * Perform process-scoped translation if relocation enabled.
+ *
+ * - Translates an effective address to a host real address in
+ * quadrants 0 and 3 when HV=1.
+ */
+ if (relocation) {
+ int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
+ pate, &g_raddr, &prot,
+ &psize, cause_excp);
+ if (ret) {
+ return ret;
+ }
+ *psizep = MIN(*psizep, psize);
+ *protp &= prot;
+ } else {
+ g_raddr = eaddr & R_EADDR_MASK;
+ }
+
+ *raddr = g_raddr;
+ return 0;
+}
+
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- PPCVirtualHypervisorClass *vhc;
- hwaddr raddr, pte_addr;
- uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
- int page_size, prot, fault_cause = 0;
- ppc_v3_pate_t pate;
+ int page_size, prot;
bool relocation;
+ hwaddr raddr;
assert(!(msr_hv && cpu->vhyp));
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
@@ -262,55 +372,12 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
}
- /* Virtual Mode Access - get the fully qualified address */
- if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
- ppc_radix64_raise_segi(cpu, rwx, eaddr);
+ /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
+ if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
+ &page_size, &prot, true)) {
return 1;
}
- /* Get Process Table */
- if (cpu->vhyp) {
- vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->get_pate(cpu->vhyp, &pate);
- } else {
- if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
- return 1;
- }
- if (!validate_pate(cpu, lpid, &pate)) {
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
- }
- /* We don't support guest mode yet */
- if (lpid != 0) {
- error_report("PowerNV guest support Unimplemented");
- exit(1);
- }
- }
-
- /* Index Process Table by PID to Find Corresponding Process Table Entry */
- offset = pid * sizeof(struct prtb_entry);
- size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
- if (offset >= size) {
- /* offset exceeds size of the process table */
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
- return 1;
- }
- prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
-
- /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
- page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
- prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &pte_addr);
- if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
- /* Couldn't get pte or access denied due to protection */
- ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
- return 1;
- }
-
- /* Update Reference and Change Bits */
- ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);
-
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, 1UL << page_size);
return 0;
@@ -318,58 +385,18 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
- CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- PPCVirtualHypervisorClass *vhc;
- hwaddr raddr, pte_addr;
- uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
- int page_size, fault_cause = 0;
- ppc_v3_pate_t pate;
+ int psize, prot;
+ hwaddr raddr;
/* Handle Real Mode */
- if (msr_dr == 0) {
+ if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
/* In real mode top 4 effective addr bits (mostly) ignored */
return eaddr & 0x0FFFFFFFFFFFFFFFULL;
}
- /* Virtual Mode Access - get the fully qualified address */
- if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
- return -1;
- }
-
- /* Get Process Table */
- if (cpu->vhyp) {
- vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->get_pate(cpu->vhyp, &pate);
- } else {
- if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
- return -1;
- }
- if (!validate_pate(cpu, lpid, &pate)) {
- return -1;
- }
- /* We don't support guest mode yet */
- if (lpid != 0) {
- error_report("PowerNV guest support Unimplemented");
- exit(1);
- }
- }
-
- /* Index Process Table by PID to Find Corresponding Process Table Entry */
- offset = pid * sizeof(struct prtb_entry);
- size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
- if (offset >= size) {
- /* offset exceeds size of the process table */
- return -1;
- }
- prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
-
- /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
- page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
- prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &pte_addr);
- if (!pte) {
+ if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
+ &prot, false)) {
return -1;
}