aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Huth <thuth@redhat.com>2016-02-18 10:15:54 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2016-02-25 13:58:44 +1100
commit3240dd9a6924df18dfccb83defa0914065da076e (patch)
tree3932ed825d17abbe9bf0eaddbccf4d887daa4520
parent4f7ab0cdbccfc1d0a3a6d0a7c9a22d8a90d9e2f0 (diff)
downloadqemu-3240dd9a6924df18dfccb83defa0914065da076e.zip
qemu-3240dd9a6924df18dfccb83defa0914065da076e.tar.gz
qemu-3240dd9a6924df18dfccb83defa0914065da076e.tar.bz2
hw/ppc/spapr: Implement the h_page_init hypercall
This hypercall either initializes a page with zeros, or copies another page. According to LoPAPR, the i-cache of the page should also be flushed if using H_ICACHE_INVALIDATE or H_ICACHE_SYNCHRONIZE, and the d-cache should be synchronized to the RAM if the H_ICACHE_SYNCHRONIZE flag is used. For this, two new functions are introduced, kvmppc_dcbst_range() and kvmppc_icbi()_range, which use the corresponding assembler instructions to flush the caches if running with KVM on Power. If the code runs with TCG instead, the code only uses tb_flush(), assuming that this will be enough for synchronization. Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r--hw/ppc/spapr_hcall.c60
-rw-r--r--target-ppc/kvm_ppc.h36
2 files changed, 94 insertions, 2 deletions
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 6e9b6be..1733482 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -386,6 +386,65 @@ static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_SUCCESS;
}
+static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong flags = args[0];
+ hwaddr dst = args[1];
+ hwaddr src = args[2];
+ hwaddr len = TARGET_PAGE_SIZE;
+ uint8_t *pdst, *psrc;
+ target_long ret = H_SUCCESS;
+
+ if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
+ | H_COPY_PAGE | H_ZERO_PAGE)) {
+ qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
+ flags);
+ return H_PARAMETER;
+ }
+
+ /* Map-in destination */
+ if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
+ return H_PARAMETER;
+ }
+ pdst = cpu_physical_memory_map(dst, &len, 1);
+ if (!pdst || len != TARGET_PAGE_SIZE) {
+ return H_PARAMETER;
+ }
+
+ if (flags & H_COPY_PAGE) {
+ /* Map-in source, copy to destination, and unmap source again */
+ if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
+ ret = H_PARAMETER;
+ goto unmap_out;
+ }
+ psrc = cpu_physical_memory_map(src, &len, 0);
+ if (!psrc || len != TARGET_PAGE_SIZE) {
+ ret = H_PARAMETER;
+ goto unmap_out;
+ }
+ memcpy(pdst, psrc, len);
+ cpu_physical_memory_unmap(psrc, len, 0, len);
+ } else if (flags & H_ZERO_PAGE) {
+ memset(pdst, 0, len); /* Just clear the destination page */
+ }
+
+ if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
+ kvmppc_dcbst_range(cpu, pdst, len);
+ }
+ if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
+ if (kvm_enabled()) {
+ kvmppc_icbi_range(cpu, pdst, len);
+ } else {
+ tb_flush(CPU(cpu));
+ }
+ }
+
+unmap_out:
+ cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
+ return ret;
+}
+
#define FLAGS_REGISTER_VPA 0x0000200000000000ULL
#define FLAGS_REGISTER_DTL 0x0000400000000000ULL
#define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
@@ -1045,6 +1104,7 @@ static void hypercall_register_types(void)
spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
spapr_register_hypercall(H_SET_DABR, h_set_dabr);
spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
+ spapr_register_hypercall(H_PAGE_INIT, h_page_init);
spapr_register_hypercall(H_SET_MODE, h_set_mode);
/* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index aaa828c..fd64c44 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -249,15 +249,47 @@ static inline int kvmppc_enable_hwrng(void)
#endif
#ifndef CONFIG_KVM
+
#define kvmppc_eieio() do { } while (0)
-#else
+
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+#else /* CONFIG_KVM */
+
#define kvmppc_eieio() \
do { \
if (kvm_enabled()) { \
asm volatile("eieio" : : : "memory"); \
} \
} while (0)
-#endif
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+ asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+ asm volatile("icbi 0,%0" : : "r"(p));
+ }
+}
+
+#endif /* CONFIG_KVM */
#ifndef KVM_INTERRUPT_SET
#define KVM_INTERRUPT_SET -1