diff options
author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2015-01-29 16:04:58 +1100 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2015-03-09 14:59:52 +0100 |
commit | ee9a569ab88edd0755402aaf31ec0c69decf7756 (patch) | |
tree | e7bf5a463ae6c5401a92241a16a8d728949032ca /hw | |
parent | 0048fa6c807fc8fb5c52873562ea3debfa65f085 (diff) | |
download | qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.zip qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.tar.gz qemu-ee9a569ab88edd0755402aaf31ec0c69decf7756.tar.bz2 |
spapr_vio/spapr_iommu: Move VIO bypass where it belongs
Instead of tweaking a TCE table device by adding there a bypass flag,
let's add an alias to RAM and IOMMU memory region, and enable/disable
those according to the selected bypass mode.
This way IOMMU memory region can have size of the actual window rather
than ram_size which is essential for upcoming DDW support.
This moves bypass logic to VIO layer and keeps @bypass flag in TCE table
for migration compatibility only. This replaces spapr_tce_set_bypass()
calls with explicit assignment to avoid confusion as the function could
do something more that just syncing the @bypass flag.
This adds a pointer to VIO device into the sPAPRTCETable struct to provide
the sPAPRTCETable device a way to update bypass mode for the VIO device.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/ppc/spapr_iommu.c | 26 | ||||
-rw-r--r-- | hw/ppc/spapr_vio.c | 28 |
2 files changed, 42 insertions, 12 deletions
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index ba003da..f3990fd 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -25,6 +25,7 @@ #include "trace.h" #include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" #include <libfdt.h> @@ -73,9 +74,7 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, .perm = IOMMU_NONE, }; - if (tcet->bypass) { - ret.perm = IOMMU_RW; - } else if ((addr >> tcet->page_shift) < tcet->nb_table) { + if ((addr >> tcet->page_shift) < tcet->nb_table) { /* Check if we are in bound */ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); @@ -91,10 +90,22 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, return ret; } +static int spapr_tce_table_post_load(void *opaque, int version_id) +{ + sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); + + if (tcet->vdev) { + spapr_vio_set_bypass(tcet->vdev, tcet->bypass); + } + + return 0; +} + static const VMStateDescription vmstate_spapr_tce_table = { .name = "spapr_iommu", .version_id = 2, .minimum_version_id = 2, + .post_load = spapr_tce_table_post_load, .fields = (VMStateField []) { /* Sanity check */ VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), @@ -132,7 +143,8 @@ static int spapr_tce_table_realize(DeviceState *dev) trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd); memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, - "iommu-spapr", ram_size); + "iommu-spapr", + (uint64_t)tcet->nb_table << tcet->page_shift); QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); @@ -192,17 +204,11 @@ MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) return &tcet->iommu; } -void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass) -{ - tcet->bypass = bypass; -} - static void spapr_tce_reset(DeviceState *dev) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); size_t table_size = tcet->nb_table * sizeof(uint64_t); - tcet->bypass = false; memset(tcet->table, 0, table_size); } diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index 032ee1a..245cdd7 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -322,6 +322,18 @@ static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev) free_crq(dev); } +void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass) +{ + if (!dev->tcet) { + return; + } + + memory_region_set_enabled(&dev->mrbypass, bypass); + memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass); + + dev->tcet->bypass = bypass; +} + static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, @@ -348,7 +360,7 @@ static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr, return; } - spapr_tce_set_bypass(dev->tcet, !!enable); + spapr_vio_set_bypass(dev, !!enable); rtas_st(rets, 0, RTAS_OUT_SUCCESS); } @@ -407,6 +419,7 @@ static void spapr_vio_busdev_reset(DeviceState *qdev) dev->signal_state = 0; + spapr_vio_set_bypass(dev, false); if (pc->reset) { pc->reset(dev); } @@ -456,12 +469,23 @@ static int spapr_vio_busdev_init(DeviceState *qdev) if (pc->rtce_window_size) { uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg; + + memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root", + ram_size); + memory_region_init_alias(&dev->mrbypass, OBJECT(dev), + "iommu-spapr-bypass", get_system_memory(), + 0, ram_size); + memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); + address_space_init(&dev->as, &dev->mrroot, qdev->id); + dev->tcet = spapr_tce_new_table(qdev, liobn, 0, SPAPR_TCE_PAGE_SHIFT, pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT, false); - address_space_init(&dev->as, spapr_tce_get_iommu(dev->tcet), qdev->id); + dev->tcet->vdev = dev; + memory_region_add_subregion_overlap(&dev->mrroot, 0, + spapr_tce_get_iommu(dev->tcet), 2); } return pc->init(dev); |