diff options
-rw-r--r-- | hw/ide/macio.c | 3 | ||||
-rw-r--r-- | hw/misc/macio/mac_dbdma.c | 13 | ||||
-rw-r--r-- | hw/ppc/spapr.c | 36 | ||||
-rw-r--r-- | hw/ppc/spapr_iommu.c | 140 | ||||
-rw-r--r-- | hw/ppc/spapr_pci.c | 28 | ||||
-rw-r--r-- | hw/ppc/spapr_vio.c | 8 | ||||
-rw-r--r-- | include/hw/pci-host/spapr.h | 2 | ||||
-rw-r--r-- | include/hw/ppc/spapr.h | 14 | ||||
-rw-r--r-- | include/migration/vmstate.h | 10 | ||||
-rw-r--r-- | include/sysemu/kvm.h | 1 | ||||
-rw-r--r-- | kvm-all.c | 7 | ||||
-rw-r--r-- | target-ppc/cpu.h | 6 | ||||
-rw-r--r-- | target-ppc/excp_helper.c | 8 | ||||
-rw-r--r-- | target-ppc/fpu_helper.c | 2 | ||||
-rw-r--r-- | target-ppc/helper.h | 1 | ||||
-rw-r--r-- | target-ppc/helper_regs.h | 8 | ||||
-rw-r--r-- | target-ppc/mmu-hash64.c | 30 | ||||
-rw-r--r-- | target-ppc/mmu_helper.c | 74 | ||||
-rw-r--r-- | target-ppc/translate.c | 83 | ||||
-rw-r--r-- | target-ppc/translate_init.c | 39 | ||||
-rw-r--r-- | trace-events | 2 |
21 files changed, 388 insertions, 127 deletions
diff --git a/hw/ide/macio.c b/hw/ide/macio.c index 42ad68a..78c10a0 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -271,7 +271,8 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) if (s->lba == -1) { /* Non-block ATAPI transfer - just copy to RAM */ s->io_buffer_size = MIN(s->io_buffer_size, io->len); - cpu_physical_memory_write(io->addr, s->io_buffer, s->io_buffer_size); + dma_memory_write(&address_space_memory, io->addr, s->io_buffer, + s->io_buffer_size); ide_atapi_cmd_ok(s); m->dma_active = false; goto done; diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 5632743..f116f9c 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -42,6 +42,7 @@ #include "hw/ppc/mac_dbdma.h" #include "qemu/main-loop.h" #include "qemu/log.h" +#include "sysemu/dma.h" /* debug DBDMA */ //#define DEBUG_DBDMA @@ -81,8 +82,8 @@ static void dbdma_cmdptr_load(DBDMA_channel *ch) { DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n", ch->regs[DBDMA_CMDPTR_LO]); - cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], + &ch->current, sizeof(dbdma_cmd)); } static void dbdma_cmdptr_save(DBDMA_channel *ch) @@ -92,8 +93,8 @@ static void dbdma_cmdptr_save(DBDMA_channel *ch) DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n", le16_to_cpu(ch->current.xfer_status), le16_to_cpu(ch->current.res_count)); - cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], + &ch->current, sizeof(dbdma_cmd)); } static void kill_channel(DBDMA_channel *ch) @@ -353,7 +354,7 @@ static void load_word(DBDMA_channel *ch, int key, uint32_t addr, return; } - cpu_physical_memory_read(addr, &val, len); + dma_memory_read(&address_space_memory, addr, &val, len); if (len == 2) val = (val << 16) | (current->cmd_dep & 0x0000ffff); @@ -398,7 +399,7 @@ static void store_word(DBDMA_channel *ch, int key, uint32_t addr, else if (len == 1) val >>= 24; - cpu_physical_memory_write(addr, &val, len); + dma_memory_write(&address_space_memory, addr, &val, len); if (conditional_wait(ch)) goto wait; diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 44e401a..0636642 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1816,11 +1816,21 @@ static void ppc_spapr_init(MachineState *machine) /* initialize hotplug memory address space */ if (machine->ram_size < machine->maxram_size) { ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size; + /* + * Limit the number of hotpluggable memory slots to half the number + * slots that KVM supports, leaving the other half for PCI and other + * devices. However ensure that number of slots doesn't drop below 32. + */ + int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : + SPAPR_MAX_RAM_SLOTS; - if (machine->ram_slots > SPAPR_MAX_RAM_SLOTS) { + if (max_memslots < SPAPR_MAX_RAM_SLOTS) { + max_memslots = SPAPR_MAX_RAM_SLOTS; + } + if (machine->ram_slots > max_memslots) { error_report("Specified number of memory slots %" PRIu64" exceeds max supported %d", - machine->ram_slots, SPAPR_MAX_RAM_SLOTS); + machine->ram_slots, max_memslots); exit(1); } @@ -2344,18 +2354,36 @@ static const TypeInfo spapr_machine_info = { type_init(spapr_machine_register_##suffix) /* + * pseries-2.7 + */ +static void spapr_machine_2_7_instance_options(MachineState *machine) +{ +} + +static void spapr_machine_2_7_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(2_7, "2.7", true); + +/* * pseries-2.6 */ +#define SPAPR_COMPAT_2_6 \ + HW_COMPAT_2_6 + static void spapr_machine_2_6_instance_options(MachineState *machine) { } static void spapr_machine_2_6_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_2_7_class_options(mc); + SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6); } -DEFINE_SPAPR_MACHINE(2_6, "2.6", true); +DEFINE_SPAPR_MACHINE(2_6, "2.6", false); /* * pseries-2.5 diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 96bb018..a3cc572 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -17,6 +17,7 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "hw/hw.h" #include "qemu/log.h" #include "sysemu/kvm.h" @@ -137,33 +138,92 @@ static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, return ret; } +static void spapr_tce_table_pre_save(void *opaque) +{ + sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); + + tcet->mig_table = tcet->table; + tcet->mig_nb_table = tcet->nb_table; + + trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table, + tcet->bus_offset, tcet->page_shift); +} + static int spapr_tce_table_post_load(void *opaque, int version_id) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); + uint32_t old_nb_table = tcet->nb_table; + uint64_t old_bus_offset = tcet->bus_offset; + uint32_t old_page_shift = tcet->page_shift; if (tcet->vdev) { spapr_vio_set_bypass(tcet->vdev, tcet->bypass); } + if (tcet->mig_nb_table != tcet->nb_table) { + spapr_tce_table_disable(tcet); + } + + if (tcet->mig_nb_table) { + if (!tcet->nb_table) { + spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset, + tcet->mig_nb_table); + } + + memcpy(tcet->table, tcet->mig_table, + tcet->nb_table * sizeof(tcet->table[0])); + + free(tcet->mig_table); + tcet->mig_table = NULL; + } + + trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table, + tcet->bus_offset, tcet->page_shift); + return 0; } +static bool spapr_tce_table_ex_needed(void *opaque) +{ + sPAPRTCETable *tcet = opaque; + + return tcet->bus_offset || tcet->page_shift != 0xC; +} + +static const VMStateDescription vmstate_spapr_tce_table_ex = { + .name = "spapr_iommu_ex", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_tce_table_ex_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(bus_offset, sPAPRTCETable), + VMSTATE_UINT32(page_shift, sPAPRTCETable), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_spapr_tce_table = { .name = "spapr_iommu", .version_id = 2, .minimum_version_id = 2, + .pre_save = spapr_tce_table_pre_save, .post_load = spapr_tce_table_post_load, .fields = (VMStateField []) { /* Sanity check */ VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), - VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable), /* IOMMU state */ + VMSTATE_UINT32(mig_nb_table, sPAPRTCETable), VMSTATE_BOOL(bypass, sPAPRTCETable), - VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t), + VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0, + vmstate_info_uint64, uint64_t), VMSTATE_END_OF_LIST() }, + .subsections = (const VMStateDescription*[]) { + &vmstate_spapr_tce_table_ex, + NULL + } }; static MemoryRegionIOMMUOps spapr_iommu_ops = { @@ -173,17 +233,16 @@ static MemoryRegionIOMMUOps spapr_iommu_ops = { static int spapr_tce_table_realize(DeviceState *dev) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); + Object *tcetobj = OBJECT(tcet); + char tmp[32]; tcet->fd = -1; - tcet->table = spapr_tce_alloc_table(tcet->liobn, - tcet->page_shift, - tcet->nb_table, - &tcet->fd, - tcet->need_vfio); + tcet->need_vfio = false; + snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn); + memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX); - memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, - "iommu-spapr", - (uint64_t)tcet->nb_table << tcet->page_shift); + snprintf(tmp, sizeof(tmp), "tce-iommu-%x", tcet->liobn); + memory_region_init_iommu(&tcet->iommu, tcetobj, &spapr_iommu_ops, tmp, 0); QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); @@ -225,14 +284,10 @@ void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio) tcet->table = newtable; } -sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, - uint64_t bus_offset, - uint32_t page_shift, - uint32_t nb_table, - bool need_vfio) +sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn) { sPAPRTCETable *tcet; - char tmp[64]; + char tmp[32]; if (spapr_tce_find_by_liobn(liobn)) { fprintf(stderr, "Attempted to create TCE table with duplicate" @@ -240,16 +295,8 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, return NULL; } - if (!nb_table) { - return NULL; - } - tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); tcet->liobn = liobn; - tcet->bus_offset = bus_offset; - tcet->page_shift = page_shift; - tcet->nb_table = nb_table; - tcet->need_vfio = need_vfio; snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn); object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL); @@ -259,19 +306,58 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, return tcet; } +void spapr_tce_table_enable(sPAPRTCETable *tcet, + uint32_t page_shift, uint64_t bus_offset, + uint32_t nb_table) +{ + if (tcet->nb_table) { + error_report("Warning: trying to enable already enabled TCE table"); + return; + } + + tcet->bus_offset = bus_offset; + tcet->page_shift = page_shift; + tcet->nb_table = nb_table; + tcet->table = spapr_tce_alloc_table(tcet->liobn, + tcet->page_shift, + tcet->nb_table, + &tcet->fd, + tcet->need_vfio); + + memory_region_set_size(&tcet->iommu, + (uint64_t)tcet->nb_table << tcet->page_shift); + memory_region_add_subregion(&tcet->root, tcet->bus_offset, &tcet->iommu); +} + +void spapr_tce_table_disable(sPAPRTCETable *tcet) +{ + if (!tcet->nb_table) { + return; + } + + memory_region_del_subregion(&tcet->root, &tcet->iommu); + memory_region_set_size(&tcet->iommu, 0); + + spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); + tcet->fd = -1; + tcet->table = NULL; + tcet->bus_offset = 0; + tcet->page_shift = 0; + tcet->nb_table = 0; +} + static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp) { sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); QLIST_REMOVE(tcet, list); - spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); - tcet->fd = -1; + spapr_tce_table_disable(tcet); } MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) { - return &tcet->iommu; + return &tcet->root; } static void spapr_tce_reset(DeviceState *dev) diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 856aec7..9f28fb3 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1310,7 +1310,6 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) PCIBus *bus; uint64_t msi_window_size = 4096; sPAPRTCETable *tcet; - uint32_t nb_table; if (sphb->index != (uint32_t)-1) { hwaddr windows_base; @@ -1462,18 +1461,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) } } - nb_table = sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT; - tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn, - 0, SPAPR_TCE_PAGE_SHIFT, nb_table, false); + tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn); if (!tcet) { error_setg(errp, "Unable to create TCE table for %s", sphb->dtbusname); return; } - /* Register default 32bit DMA window */ - memory_region_add_subregion(&sphb->iommu_root, sphb->dma_win_addr, - spapr_tce_get_iommu(tcet)); + memory_region_add_subregion_overlap(&sphb->iommu_root, 0, + spapr_tce_get_iommu(tcet), 0); sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); } @@ -1489,8 +1485,25 @@ static int spapr_phb_children_reset(Object *child, void *opaque) return 0; } +void spapr_phb_dma_reset(sPAPRPHBState *sphb) +{ + sPAPRTCETable *tcet = spapr_tce_find_by_liobn(sphb->dma_liobn); + + if (tcet && tcet->nb_table) { + spapr_tce_table_disable(tcet); + } + + /* Register default 32bit DMA window */ + spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, + sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); +} + static void spapr_phb_reset(DeviceState *qdev) { + sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); + + spapr_phb_dma_reset(sphb); + /* Reset the IOMMU state */ object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); @@ -1624,7 +1637,6 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data) dc->reset = spapr_phb_reset; dc->vmsd = &vmstate_spapr_pci; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - dc->cannot_instantiate_with_device_add_yet = false; hp->plug = spapr_phb_hot_plug_child; hp->unplug = spapr_phb_hot_unplug_child; } diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index d084aed..3d9b9c6 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -483,11 +483,9 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); address_space_init(&dev->as, &dev->mrroot, qdev->id); - dev->tcet = spapr_tce_new_table(qdev, liobn, - 0, - SPAPR_TCE_PAGE_SHIFT, - pc->rtce_window_size >> - SPAPR_TCE_PAGE_SHIFT, false); + dev->tcet = spapr_tce_new_table(qdev, liobn); + spapr_tce_table_enable(dev->tcet, SPAPR_TCE_PAGE_SHIFT, 0, + pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT); dev->tcet->vdev = dev; memory_region_add_subregion_overlap(&dev->mrroot, 0, spapr_tce_get_iommu(dev->tcet), 2); diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h index 03ee006..7848366 100644 --- a/include/hw/pci-host/spapr.h +++ b/include/hw/pci-host/spapr.h @@ -147,4 +147,6 @@ static inline void spapr_phb_vfio_reset(DeviceState *qdev) } #endif +void spapr_phb_dma_reset(sPAPRPHBState *sphb); + #endif /* __HW_SPAPR_PCI_H__ */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 815d5ee..971df3d 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -539,10 +539,12 @@ struct sPAPRTCETable { uint64_t bus_offset; uint32_t page_shift; uint64_t *table; + uint32_t mig_nb_table; + uint64_t *mig_table; bool bypass; bool need_vfio; int fd; - MemoryRegion iommu; + MemoryRegion root, iommu; struct VIOsPAPRDevice *vdev; /* for @bypass migration compatibility only */ QLIST_ENTRY(sPAPRTCETable) list; }; @@ -561,11 +563,11 @@ void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq); int spapr_h_cas_compose_response(sPAPRMachineState *sm, target_ulong addr, target_ulong size, bool cpu_update, bool memory_update); -sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, - uint64_t bus_offset, - uint32_t page_shift, - uint32_t nb_table, - bool need_vfio); +sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn); +void spapr_tce_table_enable(sPAPRTCETable *tcet, + uint32_t page_shift, uint64_t bus_offset, + uint32_t nb_table); +void spapr_tce_table_disable(sPAPRTCETable *tcet); void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio); MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet); diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 30ecc44..6c65811 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -386,6 +386,16 @@ extern const VMStateInfo vmstate_info_bitmap; .offset = vmstate_offset_pointer(_state, _field, _type), \ } +#define VMSTATE_VARRAY_UINT32_ALLOC(_field, _state, _field_num, _version, _info, _type) {\ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint32_t),\ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_UINT32|VMS_POINTER|VMS_ALLOC, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + #define VMSTATE_VARRAY_UINT16_UNSAFE(_field, _state, _field_num, _version, _info, _type) {\ .name = (stringify(_field)), \ .version_id = (_version), \ diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 65569ed..ad6f837 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -527,4 +527,5 @@ int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); * Returns: 0 on success, or a negative errno on failure. */ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); +int kvm_get_max_memslots(void); #endif @@ -126,6 +126,13 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_LAST_INFO }; +int kvm_get_max_memslots(void) +{ + KVMState *s = KVM_STATE(current_machine->accelerator); + + return s->nr_slots; +} + static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) { KVMState *s = kvm_state; diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 98a24a5..c2962d7 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -959,7 +959,6 @@ struct CPUPPCState { ppc_slb_t slb[MAX_SLB_ENTRIES]; int32_t slb_nr; /* tcg TLB needs flush (deferred slb inval instruction typically) */ - uint32_t tlb_need_flush; #endif /* segment registers */ hwaddr htab_base; @@ -985,6 +984,7 @@ struct CPUPPCState { target_ulong pb[4]; bool tlb_dirty; /* Set to non-zero when modifying TLB */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ + uint32_t tlb_need_flush; /* Delayed flush needed */ #endif /* Other registers */ @@ -1050,6 +1050,10 @@ struct CPUPPCState { hwaddr mpic_iack; /* true when the external proxy facility mode is enabled */ bool mpic_proxy; + /* set when the processor has an HV mode, thus HV priv + * instructions and SPRs are diallowed if MSR:HV is 0 + */ + bool has_hv_mode; #endif /* Those resources are used only during code translation */ diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c index a37009e..30e960e 100644 --- a/target-ppc/excp_helper.c +++ b/target-ppc/excp_helper.c @@ -709,8 +709,12 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } } #endif - /* XXX: we don't use hreg_store_msr here as already have treated - * any special case that could occur. Just store MSR and update hflags + /* We don't use hreg_store_msr here as already have treated + * any special case that could occur. Just store MSR and update hflags + * + * Note: We *MUST* not use hreg_store_msr() as-is anyway because it + * will prevent setting of the HV bit which some exceptions might need + * to do. */ env->msr = new_msr & env->msr_mask; hreg_compute_hflags(env); diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index b67ebca..6fd56a8 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -1442,7 +1442,7 @@ static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2) #define HELPER_SINGLE_SPE_CMP(name) \ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ { \ - return e##name(env, op1, op2) << 2; \ + return e##name(env, op1, op2); \ } /* efststlt */ HELPER_SINGLE_SPE_CMP(fststlt); diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 0526322..f4410a8 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -550,6 +550,7 @@ DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_2(load_slb_esid, tl, env, tl) DEF_HELPER_2(load_slb_vsid, tl, env, tl) +DEF_HELPER_2(find_slb_vsid, tl, env, tl) DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl) #endif diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h index 57da931..8fc0934 100644 --- a/target-ppc/helper_regs.h +++ b/target-ppc/helper_regs.h @@ -95,7 +95,7 @@ static inline void hreg_compute_hflags(CPUPPCState *env) /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */ hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) | (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) | - (1 << MSR_LE) | (1 << MSR_VSX); + (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR); hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB; hreg_compute_mem_idx(env); env->hflags = env->msr & hflags_mask; @@ -114,8 +114,8 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, excp = 0; value &= env->msr_mask; #if !defined(CONFIG_USER_ONLY) - if (!alter_hv) { - /* mtmsr cannot alter the hypervisor state */ + /* Neither mtmsr nor guest state can alter HV */ + if (!alter_hv || !(env->msr & MSR_HVB)) { value &= ~MSR_HVB; value |= env->msr & MSR_HVB; } @@ -151,7 +151,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, return excp; } -#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) +#if !defined(CONFIG_USER_ONLY) static inline void check_tlb_flush(CPUPPCState *env) { CPUState *cs = CPU(ppc_env_get_cpu(env)); diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c index ea6e99a..668da5e 100644 --- a/target-ppc/mmu-hash64.c +++ b/target-ppc/mmu-hash64.c @@ -219,6 +219,24 @@ static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, return 0; } +static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb, + target_ulong *rt) +{ + CPUPPCState *env = &cpu->env; + ppc_slb_t *slb; + + if (!msr_is_64bit(env, env->msr)) { + rb &= 0xffffffff; + } + slb = slb_lookup(cpu, rb); + if (slb == NULL) { + *rt = (target_ulong)-1ul; + } else { + *rt = slb->vsid; + } + return 0; +} + void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) { PowerPCCPU *cpu = ppc_env_get_cpu(env); @@ -241,6 +259,18 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) return rt; } +target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) +{ + PowerPCCPU *cpu = ppc_env_get_cpu(env); + target_ulong rt = 0; + + if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { + helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL); + } + return rt; +} + target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) { PowerPCCPU *cpu = ppc_env_get_cpu(env); diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c index 1499af72..485d5b8 100644 --- a/target-ppc/mmu_helper.c +++ b/target-ppc/mmu_helper.c @@ -512,18 +512,20 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); #if defined(DUMP_PAGE_TABLES) - if (qemu_log_mask(CPU_LOG_MMU)) { + if (qemu_loglevel_mask(CPU_LOG_MMU)) { + CPUState *cs = ENV_GET_CPU(env); hwaddr curaddr; uint32_t a0, a1, a2, a3; qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx - "\n", sdr, mask + 0x80); - for (curaddr = sdr; curaddr < (sdr + mask + 0x80); + "\n", env->htab_base, env->htab_mask + 0x80); + for (curaddr = env->htab_base; + curaddr < (env->htab_base + env->htab_mask + 0x80); curaddr += 16) { - a0 = ldl_phys(curaddr); - a1 = ldl_phys(curaddr + 4); - a2 = ldl_phys(curaddr + 8); - a3 = ldl_phys(curaddr + 12); + a0 = ldl_phys(cs->as, curaddr); + a1 = ldl_phys(cs->as, curaddr + 4); + a2 = ldl_phys(cs->as, curaddr + 8); + a3 = ldl_phys(cs->as, curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", curaddr, a0, a1, a2, a3); @@ -894,9 +896,9 @@ static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" - PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n", - __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3, - tlb->mas8); + PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" + PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask, + tlb->mas7_3, tlb->mas8); /* Check PID */ tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; @@ -1746,6 +1748,9 @@ static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = ppc_env_get_cpu(env); +#endif dump_store_bat(env, 'I', 0, nr, value); if (env->IBAT[0][nr] != value) { @@ -1764,7 +1769,7 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->IBAT[0][nr], mask); #else - tlb_flush(env, 1); + tlb_flush(CPU(cpu), 1); #endif } } @@ -1778,6 +1783,9 @@ void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = ppc_env_get_cpu(env); +#endif dump_store_bat(env, 'D', 0, nr, value); if (env->DBAT[0][nr] != value) { @@ -1796,7 +1804,7 @@ void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) #if !defined(FLUSH_ALL_TLBS) do_invalidate_BAT(env, env->DBAT[0][nr], mask); #else - tlb_flush(env, 1); + tlb_flush(CPU(cpu), 1); #endif } } @@ -1811,6 +1819,7 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) { target_ulong mask; #if defined(FLUSH_ALL_TLBS) + PowerPCCPU *cpu = ppc_env_get_cpu(env); int do_inval; #endif @@ -1843,7 +1852,7 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value) } #if defined(FLUSH_ALL_TLBS) if (do_inval) { - tlb_flush(env, 1); + tlb_flush(CPU(cpu), 1); } #endif } @@ -1854,6 +1863,7 @@ void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) #if !defined(FLUSH_ALL_TLBS) target_ulong mask; #else + PowerPCCPU *cpu = ppc_env_get_cpu(env); int do_inval; #endif @@ -1882,7 +1892,7 @@ void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value) env->DBAT[1][nr] = value; #if defined(FLUSH_ALL_TLBS) if (do_inval) { - tlb_flush(env, 1); + tlb_flush(CPU(cpu), 1); } #endif } @@ -1925,8 +1935,8 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) case POWERPC_MMU_2_06a: case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: - env->tlb_need_flush = 0; #endif /* defined(TARGET_PPC64) */ + env->tlb_need_flush = 0; tlb_flush(CPU(cpu), 1); break; default: @@ -1939,9 +1949,6 @@ void ppc_tlb_invalidate_all(CPUPPCState *env) void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) - PowerPCCPU *cpu = ppc_env_get_cpu(env); - CPUState *cs; - addr &= TARGET_PAGE_MASK; switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: @@ -1953,28 +1960,12 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) break; case POWERPC_MMU_32B: case POWERPC_MMU_601: - /* tlbie invalidate TLBs for all segments */ - addr &= ~((target_ulong)-1ULL << 28); - cs = CPU(cpu); - /* XXX: this case should be optimized, - * giving a mask to tlb_flush_page + /* Actual CPUs invalidate entire congruence classes based on the + * geometry of their TLBs and some OSes take that into account, + * we just mark the TLB to be flushed later (context synchronizing + * event or sync instruction on 32-bit). */ - tlb_flush_page(cs, addr | (0x0 << 28)); - tlb_flush_page(cs, addr | (0x1 << 28)); - tlb_flush_page(cs, addr | (0x2 << 28)); - tlb_flush_page(cs, addr | (0x3 << 28)); - tlb_flush_page(cs, addr | (0x4 << 28)); - tlb_flush_page(cs, addr | (0x5 << 28)); - tlb_flush_page(cs, addr | (0x6 << 28)); - tlb_flush_page(cs, addr | (0x7 << 28)); - tlb_flush_page(cs, addr | (0x8 << 28)); - tlb_flush_page(cs, addr | (0x9 << 28)); - tlb_flush_page(cs, addr | (0xA << 28)); - tlb_flush_page(cs, addr | (0xB << 28)); - tlb_flush_page(cs, addr | (0xC << 28)); - tlb_flush_page(cs, addr | (0xD << 28)); - tlb_flush_page(cs, addr | (0xE << 28)); - tlb_flush_page(cs, addr | (0xF << 28)); + env->tlb_need_flush = 1; break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: @@ -2040,13 +2031,12 @@ target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) { - PowerPCCPU *cpu = ppc_env_get_cpu(env); - qemu_log_mask(CPU_LOG_MMU, "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, (int)srnum, value, env->sr[srnum]); #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { + PowerPCCPU *cpu = ppc_env_get_cpu(env); uint64_t esid, vsid; /* ESID = srnum */ @@ -2075,7 +2065,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) } } #else - tlb_flush(CPU(cpu), 1); + env->tlb_need_flush = 1; #endif } } diff --git a/target-ppc/translate.c b/target-ppc/translate.c index 123e42f..b689475 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -193,6 +193,7 @@ struct DisasContext { uint32_t exception; /* Routine used to access memory */ bool pr, hv; + bool lazy_tlb_flush; int mem_idx; int access_type; /* Translation flags */ @@ -3046,10 +3047,13 @@ static void gen_std(DisasContext *ctx) rs = rS(ctx->opcode); if ((ctx->opcode & 0x3) == 0x2) { /* stq */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; + if (!(ctx->insns_flags & PPC_64BX)) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); + } + if (!legal_in_user_mode && ctx->pr) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; @@ -3290,12 +3294,17 @@ static void gen_eieio(DisasContext *ctx) { } -#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64) +#if !defined(CONFIG_USER_ONLY) static inline void gen_check_tlb_flush(DisasContext *ctx) { - TCGv_i32 t = tcg_temp_new_i32(); - TCGLabel *l = gen_new_label(); + TCGv_i32 t; + TCGLabel *l; + if (!ctx->lazy_tlb_flush) { + return; + } + l = gen_new_label(); + t = tcg_temp_new_i32(); tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l); gen_helper_check_tlb_flush(cpu_env); @@ -3475,10 +3484,14 @@ static void gen_sync(DisasContext *ctx) uint32_t l = (ctx->opcode >> 21) & 3; /* - * For l == 2, it's a ptesync, We need to check for a pending TLB flush. - * This can only happen in kernel mode however so check MSR_PR as well. + * We may need to check for a pending TLB flush. + * + * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. + * + * Additionally, this can only happen in kernel mode however so + * check MSR_PR as well. */ - if (l == 2 && !ctx->pr) { + if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { gen_check_tlb_flush(ctx); } } @@ -4108,7 +4121,7 @@ static void gen_hrfid(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else /* Restore CPU state */ - if (unlikely(!ctx->hv)) { + if (unlikely(ctx->pr || !ctx->hv)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } @@ -4338,7 +4351,10 @@ static inline void gen_op_mfspr(DisasContext *ctx) qemu_log("Trying to read invalid spr %d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); } - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); + /* Only generate an exception in user space, otherwise this is a nop */ + if (ctx->pr) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } } } @@ -4490,7 +4506,11 @@ static void gen_mtspr(DisasContext *ctx) } fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); + + /* Only generate an exception in user space, otherwise this is a nop */ + if (ctx->pr) { + gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } } } @@ -4834,6 +4854,31 @@ static void gen_slbmfev(DisasContext *ctx) cpu_gpr[rB(ctx->opcode)]); #endif } + +static void gen_slbfee_(DisasContext *ctx) +{ +#if defined(CONFIG_USER_ONLY) + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); +#else + TCGLabel *l1, *l2; + + if (unlikely(ctx->pr)) { + gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); + return; + } + gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env, + cpu_gpr[rB(ctx->opcode)]); + l1 = gen_new_label(); + l2 = gen_new_label(); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1); + tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0); + gen_set_label(l2); +#endif +} #endif /* defined(TARGET_PPC64) */ /*** Lookaside buffer management ***/ @@ -4845,7 +4890,7 @@ static void gen_tlbia(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else - if (unlikely(ctx->pr)) { + if (unlikely(ctx->pr || !ctx->hv)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } @@ -4913,7 +4958,7 @@ static void gen_slbia(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else - if (unlikely(ctx->pr || !ctx->hv)) { + if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } @@ -9931,7 +9976,7 @@ GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC), #if defined(TARGET_PPC64) GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B), #endif -GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001FF801, PPC_MISC), +GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC), GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC), GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE), GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE), @@ -9959,6 +10004,7 @@ GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001, GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B), GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B), +GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), #endif GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), /* XXX Those instructions will need to be handled differently for @@ -9967,7 +10013,7 @@ GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), #if defined(TARGET_PPC64) -GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x03FFFC01, PPC_SLBI), +GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI), #endif GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN), @@ -11478,8 +11524,10 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb) ctx.exception = POWERPC_EXCP_NONE; ctx.spr_cb = env->spr_cb; ctx.pr = msr_pr; - ctx.hv = !msr_pr && msr_hv; ctx.mem_idx = env->dmmu_idx; +#if !defined(CONFIG_USER_ONLY) + ctx.hv = msr_hv || !env->has_hv_mode; +#endif ctx.insns_flags = env->insns_flags; ctx.insns_flags2 = env->insns_flags2; ctx.access_type = -1; @@ -11489,6 +11537,11 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb) ctx.sf_mode = msr_is_64bit(env, env->msr); ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); #endif + if (env->mmu_model == POWERPC_MMU_32B || + env->mmu_model == POWERPC_MMU_601 || + (env->mmu_model & POWERPC_MMU_64B)) + ctx.lazy_tlb_flush = true; + ctx.fpu_enabled = msr_fp; if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) ctx.spe_enabled = msr_spe; diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c index 8301076..a1db500 100644 --- a/target-ppc/translate_init.c +++ b/target-ppc/translate_init.c @@ -8024,6 +8024,21 @@ static void gen_spr_power8_book4(CPUPPCState *env) #endif } +static void gen_spr_power7_book4(CPUPPCState *env) +{ + /* Add a number of P7 book4 registers */ +#if !defined(CONFIG_USER_ONLY) + spr_register_kvm(env, SPR_ACOP, "ACOP", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_ACOP, 0); + spr_register_kvm(env, SPR_BOOKS_PID, "PID", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + KVM_REG_PPC_PID, 0); +#endif +} + static void init_proc_book3s_64(CPUPPCState *env, int version) { gen_spr_ne_601(env); @@ -8066,6 +8081,9 @@ static void init_proc_book3s_64(CPUPPCState *env, int version) gen_spr_power6_common(env); gen_spr_power6_dbg(env); } + if (version == BOOK3S_CPU_POWER7) { + gen_spr_power7_book4(env); + } if (version >= BOOK3S_CPU_POWER8) { gen_spr_power8_tce_address_control(env); gen_spr_power8_ids(env); @@ -8359,7 +8377,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_64B | PPC_64H | PPC_ALTIVEC | + PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 | @@ -8450,6 +8468,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; pcc->msr_mask = (1ull << MSR_SF) | + (1ull << MSR_SHV) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | @@ -9854,10 +9873,7 @@ static void ppc_cpu_reset(CPUState *s) pcc->parent_reset(s); msr = (target_ulong)0; - if (0) { - /* XXX: find a suitable condition to enable the hypervisor mode */ - msr |= (target_ulong)MSR_HVB; - } + msr |= (target_ulong)MSR_HVB; msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */ msr |= (target_ulong)1 << MSR_EP; @@ -9958,6 +9974,19 @@ static void ppc_cpu_initfn(Object *obj) env->bfd_mach = pcc->bfd_mach; env->check_pow = pcc->check_pow; + /* Mark HV mode as supported if the CPU has an MSR_HV bit + * in the msr_mask. The mask can later be cleared by PAPR + * mode but the hv mode support will remain, thus enforcing + * that we cannot use priv. instructions in guest in PAPR + * mode. For 970 we currently simply don't set HV in msr_mask + * thus simulating an "Apple mode" 970. If we ever want to + * support 970 HV mode, we'll have to add a processor attribute + * of some sort. + */ +#if !defined(CONFIG_USER_ONLY) + env->has_hv_mode = !!(env->msr_mask & MSR_HVB); +#endif + #if defined(TARGET_PPC64) if (pcc->sps) { env->sps = *pcc->sps; diff --git a/trace-events b/trace-events index c50b870..44a8664 100644 --- a/trace-events +++ b/trace-events @@ -1431,6 +1431,8 @@ spapr_iommu_pci_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t i spapr_iommu_pci_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64 spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=%x" spapr_iommu_new_table(uint64_t liobn, void *table, int fd) "liobn=%"PRIx64" table=%p fd=%d" +spapr_iommu_pre_save(uint64_t liobn, uint32_t nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32 +spapr_iommu_post_load(uint64_t liobn, uint32_t pre_nb, uint32_t post_nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" => %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32 # hw/ppc/ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" |