aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/kvm/kvm-all.c18
-rw-r--r--accel/stubs/kvm-stub.c12
-rw-r--r--block/qcow2-bitmap.c21
-rw-r--r--hw/arm/xlnx-versal.c2
-rw-r--r--hw/misc/mos6522.c8
-rw-r--r--hw/net/virtio-net.c58
-rw-r--r--hw/ppc/spapr_events.c1
-rw-r--r--hw/ppc/spapr_irq.c17
-rw-r--r--hw/riscv/boot.c7
-rw-r--r--hw/riscv/sifive_e.c2
-rw-r--r--hw/riscv/sifive_u.c3
-rw-r--r--hw/riscv/spike.c6
-rw-r--r--hw/riscv/virt.c8
-rw-r--r--hw/vfio/pci.c64
-rw-r--r--hw/vfio/pci.h1
-rw-r--r--include/hw/arm/xlnx-versal.h3
-rw-r--r--include/hw/riscv/boot.h3
-rw-r--r--include/sysemu/kvm.h5
-rw-r--r--linux-user/syscall.c12
-rw-r--r--migration/savevm.c3
-rwxr-xr-xscripts/vmstate-static-checker.py6
-rw-r--r--target/arm/helper.c83
-rw-r--r--target/arm/m_helper.c7
-rw-r--r--target/i386/cpu.c6
-rw-r--r--target/i386/hvf/hvf.c61
-rw-r--r--target/i386/hvf/vmx.h18
-rw-r--r--target/i386/hvf/x86_decode.c64
-rw-r--r--target/i386/hvf/x86_decode.h20
-rw-r--r--target/i386/hvf/x86_emu.c3
-rw-r--r--target/i386/hvf/x86hvf.c4
-rw-r--r--tests/migration-test.c4
-rw-r--r--tests/pxe-test.c6
-rwxr-xr-xtests/qemu-iotests/16922
-rw-r--r--tests/qemu-iotests/169.out4
34 files changed, 403 insertions, 159 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 140b0bd..ca00daa 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -149,6 +149,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_LAST_INFO
};
+static NotifierList kvm_irqchip_change_notifiers =
+ NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
+
#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
@@ -1396,6 +1399,21 @@ void kvm_irqchip_release_virq(KVMState *s, int virq)
trace_kvm_irqchip_release_virq(virq);
}
+void kvm_irqchip_add_change_notifier(Notifier *n)
+{
+ notifier_list_add(&kvm_irqchip_change_notifiers, n);
+}
+
+void kvm_irqchip_remove_change_notifier(Notifier *n)
+{
+ notifier_remove(n);
+}
+
+void kvm_irqchip_change_notify(void)
+{
+ notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
+}
+
static unsigned int kvm_hash_msi(uint32_t data)
{
/* This is optimized for IA32 MSI layout. However, no other arch shall
diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c
index 6feb66e..82f118d 100644
--- a/accel/stubs/kvm-stub.c
+++ b/accel/stubs/kvm-stub.c
@@ -138,6 +138,18 @@ void kvm_irqchip_commit_routes(KVMState *s)
{
}
+void kvm_irqchip_add_change_notifier(Notifier *n)
+{
+}
+
+void kvm_irqchip_remove_change_notifier(Notifier *n)
+{
+}
+
+void kvm_irqchip_change_notify(void)
+{
+}
+
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
{
return -ENOSYS;
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
index 809bbc5..8abaf63 100644
--- a/block/qcow2-bitmap.c
+++ b/block/qcow2-bitmap.c
@@ -988,7 +988,26 @@ bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, Error **errp)
}
QSIMPLEQ_FOREACH(bm, bm_list, entry) {
- BdrvDirtyBitmap *bitmap = load_bitmap(bs, bm, errp);
+ BdrvDirtyBitmap *bitmap;
+
+ if ((bm->flags & BME_FLAG_IN_USE) &&
+ bdrv_find_dirty_bitmap(bs, bm->name))
+ {
+ /*
+ * We already have corresponding BdrvDirtyBitmap, and bitmap in the
+ * image is marked IN_USE. Firstly, this state is valid, no reason
+ * to consider existing BdrvDirtyBitmap to be bad. Secondly it's
+ * absolutely possible, when we do migration with shared storage
+ * with dirty-bitmaps capability enabled: if the bitmap was loaded
+ * from this storage before migration start, the storage will
+ * of-course contain IN_USE outdated version of the bitmap, and we
+ * should not load it on migration target, as we already have this
+ * bitmap, being migrated.
+ */
+ continue;
+ }
+
+ bitmap = load_bitmap(bs, bm, errp);
if (bitmap == NULL) {
goto fail;
}
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
index 98163eb..8b3d8d8 100644
--- a/hw/arm/xlnx-versal.c
+++ b/hw/arm/xlnx-versal.c
@@ -257,6 +257,8 @@ static void versal_unimp(Versal *s)
MM_CRL, MM_CRL_SIZE);
versal_unimp_area(s, "crf", &s->mr_ps,
MM_FPD_CRF, MM_FPD_CRF_SIZE);
+ versal_unimp_area(s, "crp", &s->mr_ps,
+ MM_PMC_CRP, MM_PMC_CRP_SIZE);
versal_unimp_area(s, "iou-scntr", &s->mr_ps,
MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE);
versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps,
diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c
index aa3bfe1..cecf0be 100644
--- a/hw/misc/mos6522.c
+++ b/hw/misc/mos6522.c
@@ -113,6 +113,10 @@ static int64_t get_next_irq_time(MOS6522State *s, MOS6522Timer *ti,
int64_t d, next_time;
unsigned int counter;
+ if (ti->frequency == 0) {
+ return INT64_MAX;
+ }
+
/* current counter value */
d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ti->load_time,
ti->frequency, NANOSECONDS_PER_SECOND);
@@ -149,10 +153,10 @@ static void mos6522_timer1_update(MOS6522State *s, MOS6522Timer *ti,
if (!ti->timer) {
return;
}
+ ti->next_irq_time = get_next_irq_time(s, ti, current_time);
if ((s->ier & T1_INT) == 0 || (s->acr & T1MODE) != T1MODE_CONT) {
timer_del(ti->timer);
} else {
- ti->next_irq_time = get_next_irq_time(s, ti, current_time);
timer_mod(ti->timer, ti->next_irq_time);
}
}
@@ -163,10 +167,10 @@ static void mos6522_timer2_update(MOS6522State *s, MOS6522Timer *ti,
if (!ti->timer) {
return;
}
+ ti->next_irq_time = get_next_irq_time(s, ti, current_time);
if ((s->ier & T2_INT) == 0) {
timer_del(ti->timer);
} else {
- ti->next_irq_time = get_next_irq_time(s, ti, current_time);
timer_mod(ti->timer, ti->next_irq_time);
}
}
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 97a5113..3c31471 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -759,6 +759,10 @@ static void failover_add_primary(VirtIONet *n, Error **errp)
{
Error *err = NULL;
+ if (n->primary_dev) {
+ return;
+ }
+
n->primary_device_opts = qemu_opts_find(qemu_find_opts("device"),
n->primary_device_id);
if (n->primary_device_opts) {
@@ -2801,25 +2805,33 @@ static bool failover_replug_primary(VirtIONet *n, Error **errp)
n->primary_device_opts = qemu_opts_from_qdict(
qemu_find_opts("device"),
n->primary_device_dict, errp);
- }
- if (n->primary_device_opts) {
- if (n->primary_dev) {
- n->primary_bus = n->primary_dev->parent_bus;
- }
- qdev_set_parent_bus(n->primary_dev, n->primary_bus);
- n->primary_should_be_hidden = false;
- qemu_opt_set_bool(n->primary_device_opts,
- "partially_hotplugged", true, errp);
- hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev);
- if (hotplug_ctrl) {
- hotplug_handler_pre_plug(hotplug_ctrl, n->primary_dev, errp);
- hotplug_handler_plug(hotplug_ctrl, n->primary_dev, errp);
+ if (!n->primary_device_opts) {
+ error_setg(errp, "virtio_net: couldn't find primary device opts");
+ goto out;
}
- if (!n->primary_dev) {
+ }
+ if (!n->primary_dev) {
error_setg(errp, "virtio_net: couldn't find primary device");
- }
+ goto out;
}
- return *errp != NULL;
+
+ n->primary_bus = n->primary_dev->parent_bus;
+ if (!n->primary_bus) {
+ error_setg(errp, "virtio_net: couldn't find primary bus");
+ goto out;
+ }
+ qdev_set_parent_bus(n->primary_dev, n->primary_bus);
+ n->primary_should_be_hidden = false;
+ qemu_opt_set_bool(n->primary_device_opts,
+ "partially_hotplugged", true, errp);
+ hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev);
+ if (hotplug_ctrl) {
+ hotplug_handler_pre_plug(hotplug_ctrl, n->primary_dev, errp);
+ hotplug_handler_plug(hotplug_ctrl, n->primary_dev, errp);
+ }
+
+out:
+ return *errp == NULL;
}
static void virtio_net_handle_migration_primary(VirtIONet *n,
@@ -2848,7 +2860,7 @@ static void virtio_net_handle_migration_primary(VirtIONet *n,
warn_report("couldn't unplug primary device");
}
} else if (migration_has_failed(s)) {
- /* We already unplugged the device let's plugged it back */
+ /* We already unplugged the device let's plug it back */
if (!failover_replug_primary(n, &err)) {
if (err) {
error_report_err(err);
@@ -2868,9 +2880,12 @@ static int virtio_net_primary_should_be_hidden(DeviceListener *listener,
QemuOpts *device_opts)
{
VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
- bool match_found;
- bool hide;
+ bool match_found = false;
+ bool hide = false;
+ if (!device_opts) {
+ return -1;
+ }
n->primary_device_dict = qemu_opts_to_qdict(device_opts,
n->primary_device_dict);
if (n->primary_device_dict) {
@@ -2878,7 +2893,7 @@ static int virtio_net_primary_should_be_hidden(DeviceListener *listener,
n->standby_id = g_strdup(qdict_get_try_str(n->primary_device_dict,
"failover_pair_id"));
}
- if (device_opts && g_strcmp0(n->standby_id, n->netclient_name) == 0) {
+ if (g_strcmp0(n->standby_id, n->netclient_name) == 0) {
match_found = true;
} else {
match_found = false;
@@ -3124,6 +3139,9 @@ static bool primary_unplug_pending(void *opaque)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIONet *n = VIRTIO_NET(vdev);
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
+ return false;
+ }
return n->primary_dev ? n->primary_dev->pending_deleted_event : false;
}
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 0e4c195..e355e00 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -358,6 +358,7 @@ static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr,
rtas_event_log_to_source(spapr,
spapr_event_log_entry_type(entry));
+ g_assert(source);
if (source->mask & event_mask) {
break;
}
diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c
index 168044b..d6bb7fd 100644
--- a/hw/ppc/spapr_irq.c
+++ b/hw/ppc/spapr_irq.c
@@ -373,6 +373,14 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr,
smc->nr_xirqs + SPAPR_XIRQ_BASE);
+
+ /*
+ * Mostly we don't actually need this until reset, except that not
+ * having this set up can cause VFIO devices to issue a
+ * false-positive warning during realize(), because they don't yet
+ * have an in-kernel irq chip.
+ */
+ spapr_irq_update_active_intc(spapr);
}
int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
@@ -508,6 +516,12 @@ static void set_active_intc(SpaprMachineState *spapr,
}
spapr->active_intc = new_intc;
+
+ /*
+ * We've changed the kernel irqchip, let VFIO devices know they
+ * need to readjust.
+ */
+ kvm_irqchip_change_notify();
}
void spapr_irq_update_active_intc(SpaprMachineState *spapr)
@@ -522,7 +536,8 @@ void spapr_irq_update_active_intc(SpaprMachineState *spapr)
* this.
*/
new_intc = SPAPR_INTC(spapr->xive);
- } else if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
+ } else if (spapr->ov5_cas
+ && spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
new_intc = SPAPR_INTC(spapr->xive);
} else {
new_intc = SPAPR_INTC(spapr->ics);
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index 7fee98d..027303d 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -114,12 +114,13 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
exit(1);
}
-target_ulong riscv_load_kernel(const char *kernel_filename)
+target_ulong riscv_load_kernel(const char *kernel_filename, symbol_fn_t sym_cb)
{
uint64_t kernel_entry, kernel_high;
- if (load_elf(kernel_filename, NULL, NULL, NULL,
- &kernel_entry, NULL, &kernel_high, 0, EM_RISCV, 1, 0) > 0) {
+ if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
+ &kernel_entry, NULL, &kernel_high, 0,
+ EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
return kernel_entry;
}
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index 0f9d641..8a6b034 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -111,7 +111,7 @@ static void riscv_sifive_e_init(MachineState *machine)
memmap[SIFIVE_E_MROM].base, &address_space_memory);
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename);
+ riscv_load_kernel(machine->kernel_filename, NULL);
}
}
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index 9552abf..0140e95 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -344,7 +344,8 @@ static void riscv_sifive_u_init(MachineState *machine)
memmap[SIFIVE_U_DRAM].base);
if (machine->kernel_filename) {
- uint64_t kernel_entry = riscv_load_kernel(machine->kernel_filename);
+ uint64_t kernel_entry = riscv_load_kernel(machine->kernel_filename,
+ NULL);
if (machine->initrd_filename) {
hwaddr start;
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index 8bbffbc..8823681 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -184,7 +184,7 @@ static void spike_board_init(MachineState *machine)
mask_rom);
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename);
+ riscv_load_kernel(machine->kernel_filename, htif_symbol_callback);
}
/* reset vector */
@@ -273,7 +273,7 @@ static void spike_v1_10_0_board_init(MachineState *machine)
mask_rom);
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename);
+ riscv_load_kernel(machine->kernel_filename, htif_symbol_callback);
}
/* reset vector */
@@ -359,7 +359,7 @@ static void spike_v1_09_1_board_init(MachineState *machine)
mask_rom);
if (machine->kernel_filename) {
- riscv_load_kernel(machine->kernel_filename);
+ riscv_load_kernel(machine->kernel_filename, htif_symbol_callback);
}
/* reset vector */
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 23f340d..c44b865 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -359,7 +359,10 @@ static void create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap,
nodename = g_strdup_printf("/test@%lx",
(long)memmap[VIRT_TEST].base);
qemu_fdt_add_subnode(fdt, nodename);
- qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,test0");
+ {
+ const char compat[] = "sifive,test1\0sifive,test0";
+ qemu_fdt_setprop(fdt, nodename, "compatible", compat, sizeof(compat));
+ }
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_TEST].base,
0x0, memmap[VIRT_TEST].size);
@@ -476,7 +479,8 @@ static void riscv_virt_board_init(MachineState *machine)
memmap[VIRT_DRAM].base);
if (machine->kernel_filename) {
- uint64_t kernel_entry = riscv_load_kernel(machine->kernel_filename);
+ uint64_t kernel_entry = riscv_load_kernel(machine->kernel_filename,
+ NULL);
if (machine->initrd_filename) {
hwaddr start;
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 0c55883..2d40b39 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -216,30 +216,18 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
#endif
}
-static void vfio_intx_update(PCIDevice *pdev)
+static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route)
{
- VFIOPCIDevice *vdev = PCI_VFIO(pdev);
- PCIINTxRoute route;
Error *err = NULL;
- if (vdev->interrupt != VFIO_INT_INTx) {
- return;
- }
-
- route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
-
- if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
- return; /* Nothing changed */
- }
-
trace_vfio_intx_update(vdev->vbasedev.name,
- vdev->intx.route.irq, route.irq);
+ vdev->intx.route.irq, route->irq);
vfio_intx_disable_kvm(vdev);
- vdev->intx.route = route;
+ vdev->intx.route = *route;
- if (route.mode != PCI_INTX_ENABLED) {
+ if (route->mode != PCI_INTX_ENABLED) {
return;
}
@@ -252,6 +240,30 @@ static void vfio_intx_update(PCIDevice *pdev)
vfio_intx_eoi(&vdev->vbasedev);
}
+static void vfio_intx_routing_notifier(PCIDevice *pdev)
+{
+ VFIOPCIDevice *vdev = PCI_VFIO(pdev);
+ PCIINTxRoute route;
+
+ if (vdev->interrupt != VFIO_INT_INTx) {
+ return;
+ }
+
+ route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
+
+ if (pci_intx_route_changed(&vdev->intx.route, &route)) {
+ vfio_intx_update(vdev, &route);
+ }
+}
+
+static void vfio_irqchip_change(Notifier *notify, void *data)
+{
+ VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice,
+ irqchip_change_notifier);
+
+ vfio_intx_update(vdev, &vdev->intx.route);
+}
+
static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
{
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
@@ -2967,31 +2979,34 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
vfio_intx_mmap_enable, vdev);
- pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
+ pci_device_set_intx_routing_notifier(&vdev->pdev,
+ vfio_intx_routing_notifier);
+ vdev->irqchip_change_notifier.notify = vfio_irqchip_change;
+ kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier);
ret = vfio_intx_enable(vdev, errp);
if (ret) {
- goto out_teardown;
+ goto out_deregister;
}
}
if (vdev->display != ON_OFF_AUTO_OFF) {
ret = vfio_display_probe(vdev, errp);
if (ret) {
- goto out_teardown;
+ goto out_deregister;
}
}
if (vdev->enable_ramfb && vdev->dpy == NULL) {
error_setg(errp, "ramfb=on requires display=on");
- goto out_teardown;
+ goto out_deregister;
}
if (vdev->display_xres || vdev->display_yres) {
if (vdev->dpy == NULL) {
error_setg(errp, "xres and yres properties require display=on");
- goto out_teardown;
+ goto out_deregister;
}
if (vdev->dpy->edid_regs == NULL) {
error_setg(errp, "xres and yres properties need edid support");
- goto out_teardown;
+ goto out_deregister;
}
}
@@ -3015,8 +3030,10 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
return;
-out_teardown:
+out_deregister:
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
+out_teardown:
vfio_teardown_msi(vdev);
vfio_bars_exit(vdev);
error:
@@ -3059,6 +3076,7 @@ static void vfio_exitfn(PCIDevice *pdev)
vfio_unregister_req_notifier(vdev);
vfio_unregister_err_notifier(vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier);
vfio_disable_interrupts(vdev);
if (vdev->intx.mmap_timer) {
timer_free(vdev->intx.mmap_timer);
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index b329d50..35626cd 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -169,6 +169,7 @@ typedef struct VFIOPCIDevice {
bool enable_ramfb;
VFIODisplay *dpy;
Error *migration_blocker;
+ Notifier irqchip_change_notifier;
} VFIOPCIDevice;
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index 14405c1..d844c4f 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -119,4 +119,7 @@ typedef struct Versal {
#define MM_IOU_SCNTRS_SIZE 0x10000
#define MM_FPD_CRF 0xfd1a0000U
#define MM_FPD_CRF_SIZE 0x140000
+
+#define MM_PMC_CRP 0xf1260000U
+#define MM_PMC_CRP_SIZE 0x10000
#endif
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index 66075d0..df80051 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -28,7 +28,8 @@ void riscv_find_and_load_firmware(MachineState *machine,
char *riscv_find_firmware(const char *firmware_filename);
target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr);
-target_ulong riscv_load_kernel(const char *kernel_filename);
+target_ulong riscv_load_kernel(const char *kernel_filename,
+ symbol_fn_t sym_cb);
hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size,
uint64_t kernel_entry, hwaddr *start);
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 9d14328..9fe233b 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -201,6 +201,7 @@ typedef struct KVMCapabilityInfo {
struct KVMState;
typedef struct KVMState KVMState;
extern KVMState *kvm_state;
+typedef struct Notifier Notifier;
/* external API */
@@ -401,6 +402,10 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
+void kvm_irqchip_add_change_notifier(Notifier *n);
+void kvm_irqchip_remove_change_notifier(Notifier *n);
+void kvm_irqchip_change_notify(void);
+
void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
struct kvm_guest_debug;
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index ce399a5..171c0ca 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6743,12 +6743,12 @@ static inline abi_long host_to_target_statx(struct target_statx *host_stx,
__put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
__put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
__put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
- __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
- __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
- __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
- __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
- __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
- __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
+ __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
+ __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
+ __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
+ __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
+ __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
+ __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
__put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
__put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
__put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
diff --git a/migration/savevm.c b/migration/savevm.c
index 966a9c3..a71b930 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1119,7 +1119,8 @@ int qemu_savevm_nr_failover_devices(void)
int n = 0;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (se->vmsd && se->vmsd->dev_unplug_pending) {
+ if (se->vmsd && se->vmsd->dev_unplug_pending &&
+ se->vmsd->dev_unplug_pending(se->opaque)) {
n++;
}
}
diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py
index d346728..f8b7b8f 100755
--- a/scripts/vmstate-static-checker.py
+++ b/scripts/vmstate-static-checker.py
@@ -375,9 +375,11 @@ def main():
help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
parser = argparse.ArgumentParser(description=help_text)
- parser.add_argument('-s', '--src', type=file, required=True,
+ parser.add_argument('-s', '--src', type=argparse.FileType('r'),
+ required=True,
help='json dump from src qemu')
- parser.add_argument('-d', '--dest', type=file, required=True,
+ parser.add_argument('-d', '--dest', type=argparse.FileType('r'),
+ required=True,
help='json dump from dest qemu')
parser.add_argument('--reverse', required=False, default=False,
action='store_true',
diff --git a/target/arm/helper.c b/target/arm/helper.c
index a089fb5..0bf8f53 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1934,8 +1934,11 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
CPUState *cs = env_cpu(env);
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
uint64_t ret = 0;
+ bool allow_virt = (arm_current_el(env) == 1 &&
+ (!arm_is_secure_below_el3(env) ||
+ (env->cp15.scr_el3 & SCR_EEL2)));
- if (hcr_el2 & HCR_IMO) {
+ if (allow_virt && (hcr_el2 & HCR_IMO)) {
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= CPSR_I;
}
@@ -1945,7 +1948,7 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
- if (hcr_el2 & HCR_FMO) {
+ if (allow_virt && (hcr_el2 & HCR_FMO)) {
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= CPSR_F;
}
@@ -5975,6 +5978,26 @@ static const ARMCPRegInfo predinv_reginfo[] = {
REGINFO_SENTINEL
};
+static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ return access_aa64_tid3(env, ri, isread);
+ }
+
+ return CP_ACCESS_OK;
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -5998,6 +6021,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_pfr0 },
/* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
* the value of the GIC field until after we define these regs.
@@ -6005,63 +6029,78 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_NO_RAW,
+ .accessfn = access_aa32_tid3,
.readfn = id_pfr1_read,
.writefn = arm_cp_write_ignore },
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_dfr0 },
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_afr0 },
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_mmfr0 },
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_mmfr1 },
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_mmfr2 },
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_mmfr3 },
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar0 },
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar1 },
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar2 },
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar3 },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar4 },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar5 },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->id_mmfr4 },
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
.resetvalue = cpu->isar.id_isar6 },
REGINFO_SENTINEL
};
@@ -6182,164 +6221,204 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_NO_RAW,
+ .accessfn = access_aa64_tid3,
.readfn = id_aa64pfr0_read,
.writefn = arm_cp_write_ignore },
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64pfr1},
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
/* At present, only SVEver == 0 is defined anyway. */
.resetvalue = 0 },
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->id_aa64dfr0 },
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->id_aa64dfr1 },
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->id_aa64afr0 },
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->id_aa64afr1 },
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64isar0 },
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64isar1 },
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64mmfr0 },
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.id_aa64mmfr1 },
{ .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.mvfr0 },
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.mvfr1 },
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = cpu->isar.mvfr2 },
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
.resetvalue = 0 },
{ .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 4a48b79..76de317 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -2233,19 +2233,18 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (env->v7m.secure) {
lr |= R_V7M_EXCRET_S_MASK;
}
- if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
- lr |= R_V7M_EXCRET_FTYPE_MASK;
- }
} else {
lr = R_V7M_EXCRET_RES1_MASK |
R_V7M_EXCRET_S_MASK |
R_V7M_EXCRET_DCRS_MASK |
- R_V7M_EXCRET_FTYPE_MASK |
R_V7M_EXCRET_ES_MASK;
if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
lr |= R_V7M_EXCRET_SPSEL_MASK;
}
}
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
+ }
if (!arm_v7m_is_handler_mode(env)) {
lr |= R_V7M_EXCRET_MODE_MASK;
}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 730fb28..69f518a 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3006,7 +3006,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
- VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
.xlevel = 0x80000008,
.model_id = "Intel Xeon Processor (Skylake)",
.versions = (X86CPUVersionDefinition[]) {
@@ -3131,7 +3132,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
- VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
.xlevel = 0x80000008,
.model_id = "Intel Xeon Processor (Cascadelake)",
.versions = (X86CPUVersionDefinition[]) {
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index 231732a..784e67d 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -107,14 +107,14 @@ static void assert_hvf_ok(hv_return_t ret)
}
/* Memory slots */
-hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)
+hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
{
hvf_slot *slot;
int x;
for (x = 0; x < hvf_state->num_slots; ++x) {
slot = &hvf_state->slots[x];
if (slot->size && start < (slot->start + slot->size) &&
- end > slot->start) {
+ (start + size) > slot->start) {
return slot;
}
}
@@ -129,12 +129,10 @@ struct mac_slot {
};
struct mac_slot mac_slots[32];
-#define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))
-static int do_hvf_set_memory(hvf_slot *slot)
+static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
{
struct mac_slot *macslot;
- hv_memory_flags_t flags;
hv_return_t ret;
macslot = &mac_slots[slot->slot_id];
@@ -151,8 +149,6 @@ static int do_hvf_set_memory(hvf_slot *slot)
return 0;
}
- flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
-
macslot->present = 1;
macslot->gpa_start = slot->start;
macslot->size = slot->size;
@@ -165,14 +161,24 @@ void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
{
hvf_slot *mem;
MemoryRegion *area = section->mr;
+ bool writeable = !area->readonly && !area->rom_device;
+ hv_memory_flags_t flags;
if (!memory_region_is_ram(area)) {
- return;
+ if (writeable) {
+ return;
+ } else if (!memory_region_is_romd(area)) {
+ /*
+ * If the memory device is not in romd_mode, then we actually want
+ * to remove the hvf memory slot so all accesses will trap.
+ */
+ add = false;
+ }
}
mem = hvf_find_overlap_slot(
section->offset_within_address_space,
- section->offset_within_address_space + int128_get64(section->size));
+ int128_get64(section->size));
if (mem && add) {
if (mem->size == int128_get64(section->size) &&
@@ -186,7 +192,7 @@ void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
/* Region needs to be reset. set the size to 0 and remap it. */
if (mem) {
mem->size = 0;
- if (do_hvf_set_memory(mem)) {
+ if (do_hvf_set_memory(mem, 0)) {
error_report("Failed to reset overlapping slot");
abort();
}
@@ -196,6 +202,13 @@ void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
return;
}
+ if (area->readonly ||
+ (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
+ flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
+ } else {
+ flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
+ }
+
/* Now make a new slot. */
int x;
@@ -216,7 +229,7 @@ void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
mem->start = section->offset_within_address_space;
mem->region = area;
- if (do_hvf_set_memory(mem)) {
+ if (do_hvf_set_memory(mem, flags)) {
error_report("Error registering new memory slot");
abort();
}
@@ -345,7 +358,14 @@ static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
return false;
}
- return !slot;
+ if (!slot) {
+ return true;
+ }
+ if (!memory_region_is_ram(slot->region) &&
+ !(read && memory_region_is_romd(slot->region))) {
+ return true;
+ }
+ return false;
}
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
@@ -354,7 +374,7 @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
slot = hvf_find_overlap_slot(
section->offset_within_address_space,
- section->offset_within_address_space + int128_get64(section->size));
+ int128_get64(section->size));
/* protect region against writes; begin tracking it */
if (on) {
@@ -421,12 +441,20 @@ static MemoryListener hvf_memory_listener = {
};
void hvf_reset_vcpu(CPUState *cpu) {
+ uint64_t pdpte[4] = {0, 0, 0, 0};
+ int i;
/* TODO: this shouldn't be needed; there is already a call to
* cpu_synchronize_all_post_reset in vl.c
*/
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
+
+ /* Initialize PDPTE */
+ for (i = 0; i < 4; i++) {
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
+ }
+
macvm_set_cr0(cpu->hvf_fd, 0x60000010);
wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
@@ -498,7 +526,6 @@ void hvf_reset_vcpu(CPUState *cpu) {
wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
}
- hv_vm_sync_tsc(0);
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
hv_vcpu_flush(cpu->hvf_fd);
}
@@ -592,7 +619,7 @@ int hvf_init_vcpu(CPUState *cpu)
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
- /*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
@@ -720,7 +747,7 @@ int hvf_vcpu_exec(CPUState *cpu)
ret = EXCP_INTERRUPT;
break;
}
- /* Need to check if MMIO or unmmaped fault */
+ /* Need to check if MMIO or unmapped fault */
case EXIT_REASON_EPT_FAULT:
{
hvf_slot *slot;
@@ -731,7 +758,7 @@ int hvf_vcpu_exec(CPUState *cpu)
vmx_set_nmi_blocking(cpu);
}
- slot = hvf_find_overlap_slot(gpa, gpa);
+ slot = hvf_find_overlap_slot(gpa, 1);
/* mmio */
if (ept_emulation_fault(slot, gpa, exit_qual)) {
struct x86_decode decode;
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
index 5dc52ec..eb8894c 100644
--- a/target/i386/hvf/vmx.h
+++ b/target/i386/hvf/vmx.h
@@ -121,6 +121,7 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
uint64_t pdpte[4] = {0, 0, 0, 0};
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
+ uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
!(efer & MSR_EFER_LME)) {
@@ -128,18 +129,15 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
MEMTXATTRS_UNSPECIFIED,
(uint8_t *)pdpte, 32, 0);
+ /* Only set PDPTE when appropriate. */
+ for (i = 0; i < 4; i++) {
+ wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
+ }
}
- for (i = 0; i < 4; i++) {
- wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
- }
-
- wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);
+ wvmcs(vcpu, VMCS_CR0_MASK, mask);
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
- cr0 &= ~CR0_CD;
- wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
-
if (efer & MSR_EFER_LME) {
if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
enter_long_mode(vcpu, cr0, efer);
@@ -149,6 +147,10 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
}
}
+ /* Filter new CR0 after we are finished examining it above. */
+ cr0 = (cr0 & ~(mask & ~CR0_PG));
+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
+
hv_vcpu_invalidate_tlb(vcpu);
hv_vcpu_flush(vcpu);
}
diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
index 822fa18..77c3466 100644
--- a/target/i386/hvf/x86_decode.c
+++ b/target/i386/hvf/x86_decode.c
@@ -122,7 +122,8 @@ static void decode_rax(CPUX86State *env, struct x86_decode *decode,
{
op->type = X86_VAR_REG;
op->reg = R_EAX;
- op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, 0,
+ /* Since reg is always AX, REX prefix has no impact. */
+ op->ptr = get_reg_ref(env, op->reg, false, 0,
decode->operand_size);
}
@@ -1687,40 +1688,37 @@ calc_addr:
}
}
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex, int is_extended,
- int size)
+target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
{
target_ulong ptr = 0;
- int which = 0;
if (is_extended) {
reg |= R_R8;
}
-
switch (size) {
case 1:
- if (is_extended || reg < 4 || rex) {
- which = 1;
+ if (is_extended || reg < 4 || rex_present) {
ptr = (target_ulong)&RL(env, reg);
} else {
- which = 2;
ptr = (target_ulong)&RH(env, reg - 4);
}
break;
default:
- which = 3;
ptr = (target_ulong)&RRX(env, reg);
break;
}
return ptr;
}
-target_ulong get_reg_val(CPUX86State *env, int reg, int rex, int is_extended,
- int size)
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
{
target_ulong val = 0;
- memcpy(&val, (void *)get_reg_ref(env, reg, rex, is_extended, size), size);
+ memcpy(&val,
+ (void *)get_reg_ref(env, reg, rex_present, is_extended, size),
+ size);
return val;
}
@@ -1853,28 +1851,38 @@ void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
{
while (1) {
+ /*
+ * REX prefix must come after legacy prefixes.
+ * REX before legacy is ignored.
+ * Clear rex to simulate this.
+ */
uint8_t byte = decode_byte(env, decode);
switch (byte) {
case PREFIX_LOCK:
decode->lock = byte;
+ decode->rex.rex = 0;
break;
case PREFIX_REPN:
case PREFIX_REP:
decode->rep = byte;
+ decode->rex.rex = 0;
break;
- case PREFIX_CS_SEG_OVEERIDE:
- case PREFIX_SS_SEG_OVEERIDE:
- case PREFIX_DS_SEG_OVEERIDE:
- case PREFIX_ES_SEG_OVEERIDE:
- case PREFIX_FS_SEG_OVEERIDE:
- case PREFIX_GS_SEG_OVEERIDE:
+ case PREFIX_CS_SEG_OVERRIDE:
+ case PREFIX_SS_SEG_OVERRIDE:
+ case PREFIX_DS_SEG_OVERRIDE:
+ case PREFIX_ES_SEG_OVERRIDE:
+ case PREFIX_FS_SEG_OVERRIDE:
+ case PREFIX_GS_SEG_OVERRIDE:
decode->segment_override = byte;
+ decode->rex.rex = 0;
break;
case PREFIX_OP_SIZE_OVERRIDE:
decode->op_size_override = byte;
+ decode->rex.rex = 0;
break;
case PREFIX_ADDR_SIZE_OVERRIDE:
decode->addr_size_override = byte;
+ decode->rex.rex = 0;
break;
case PREFIX_REX ... (PREFIX_REX + 0xf):
if (x86_is_long_mode(env_cpu(env))) {
@@ -2111,14 +2119,14 @@ void init_decoder()
{
int i;
- for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
- memcpy(_decode_tbl1, &invl_inst, sizeof(invl_inst));
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) {
+ memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst));
}
for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
- memcpy(_decode_tbl2, &invl_inst, sizeof(invl_inst));
+ memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst));
}
for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {
- memcpy(_decode_tbl3, &invl_inst, sizeof(invl_inst_x87));
+ memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87));
}
for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {
@@ -2167,22 +2175,22 @@ target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
target_ulong addr, X86Seg seg)
{
switch (decode->segment_override) {
- case PREFIX_CS_SEG_OVEERIDE:
+ case PREFIX_CS_SEG_OVERRIDE:
seg = R_CS;
break;
- case PREFIX_SS_SEG_OVEERIDE:
+ case PREFIX_SS_SEG_OVERRIDE:
seg = R_SS;
break;
- case PREFIX_DS_SEG_OVEERIDE:
+ case PREFIX_DS_SEG_OVERRIDE:
seg = R_DS;
break;
- case PREFIX_ES_SEG_OVEERIDE:
+ case PREFIX_ES_SEG_OVERRIDE:
seg = R_ES;
break;
- case PREFIX_FS_SEG_OVEERIDE:
+ case PREFIX_FS_SEG_OVERRIDE:
seg = R_FS;
break;
- case PREFIX_GS_SEG_OVEERIDE:
+ case PREFIX_GS_SEG_OVERRIDE:
seg = R_GS;
break;
default:
diff --git a/target/i386/hvf/x86_decode.h b/target/i386/hvf/x86_decode.h
index bc574a7..ef79601 100644
--- a/target/i386/hvf/x86_decode.h
+++ b/target/i386/hvf/x86_decode.h
@@ -27,12 +27,12 @@ typedef enum x86_prefix {
PREFIX_REPN = 0xf2,
PREFIX_REP = 0xf3,
/* group 2 */
- PREFIX_CS_SEG_OVEERIDE = 0x2e,
- PREFIX_SS_SEG_OVEERIDE = 0x36,
- PREFIX_DS_SEG_OVEERIDE = 0x3e,
- PREFIX_ES_SEG_OVEERIDE = 0x26,
- PREFIX_FS_SEG_OVEERIDE = 0x64,
- PREFIX_GS_SEG_OVEERIDE = 0x65,
+ PREFIX_CS_SEG_OVERRIDE = 0x2e,
+ PREFIX_SS_SEG_OVERRIDE = 0x36,
+ PREFIX_DS_SEG_OVERRIDE = 0x3e,
+ PREFIX_ES_SEG_OVERRIDE = 0x26,
+ PREFIX_FS_SEG_OVERRIDE = 0x64,
+ PREFIX_GS_SEG_OVERRIDE = 0x65,
/* group 3 */
PREFIX_OP_SIZE_OVERRIDE = 0x66,
/* group 4 */
@@ -303,10 +303,10 @@ uint64_t sign(uint64_t val, int size);
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex, int is_extended,
- int size);
-target_ulong get_reg_val(CPUX86State *env, int reg, int rex, int is_extended,
- int size);
+target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op);
target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
index 1b04bd7..3df7672 100644
--- a/target/i386/hvf/x86_emu.c
+++ b/target/i386/hvf/x86_emu.c
@@ -772,9 +772,6 @@ void simulate_wrmsr(struct CPUState *cpu)
switch (msr) {
case MSR_IA32_TSC:
- /* if (!osx_is_sierra())
- wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());
- hv_vm_sync_tsc(data);*/
break;
case MSR_IA32_APICBASE:
cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index e0ea02d..1485b95 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -152,10 +152,6 @@ void hvf_put_msrs(CPUState *cpu_state)
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
-
- /* if (!osx_is_sierra())
- wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/
- hv_vm_sync_tsc(env->tsc);
}
diff --git a/tests/migration-test.c b/tests/migration-test.c
index ac780df..ebd77a5 100644
--- a/tests/migration-test.c
+++ b/tests/migration-test.c
@@ -614,7 +614,7 @@ static int test_migrate_start(QTestState **from, QTestState **to,
end_address = S390_TEST_MEM_END;
} else if (strcmp(arch, "ppc64") == 0) {
extra_opts = use_shmem ? get_shmem_opts("256M", shmem_path) : NULL;
- cmd_src = g_strdup_printf("-machine accel=%s -m 256M -nodefaults"
+ cmd_src = g_strdup_printf("-machine accel=%s,vsmt=8 -m 256M -nodefaults"
" -name source,debug-threads=on"
" -serial file:%s/src_serial"
" -prom-env 'use-nvramrc?=true' -prom-env "
@@ -623,7 +623,7 @@ static int test_migrate_start(QTestState **from, QTestState **to,
"until' %s %s", accel, tmpfs, end_address,
start_address, extra_opts ? extra_opts : "",
opts_src);
- cmd_dst = g_strdup_printf("-machine accel=%s -m 256M"
+ cmd_dst = g_strdup_printf("-machine accel=%s,vsmt=8 -m 256M"
" -name target,debug-threads=on"
" -serial file:%s/dest_serial"
" -incoming %s %s %s",
diff --git a/tests/pxe-test.c b/tests/pxe-test.c
index 948b0fb..aaae54f 100644
--- a/tests/pxe-test.c
+++ b/tests/pxe-test.c
@@ -46,15 +46,15 @@ static testdef_t x86_tests_slow[] = {
static testdef_t ppc64_tests[] = {
{ "pseries", "spapr-vlan",
- "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken" },
+ "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,vsmt=8" },
{ "pseries", "virtio-net-pci",
- "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken" },
+ "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,vsmt=8" },
{ NULL },
};
static testdef_t ppc64_tests_slow[] = {
{ "pseries", "e1000",
- "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken" },
+ "-machine cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,vsmt=8" },
{ NULL },
};
diff --git a/tests/qemu-iotests/169 b/tests/qemu-iotests/169
index 8c204ca..9656a7f 100755
--- a/tests/qemu-iotests/169
+++ b/tests/qemu-iotests/169
@@ -134,7 +134,7 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.check_bitmap(self.vm_a, sha256 if persistent else False)
def do_test_migration(self, persistent, migrate_bitmaps, online,
- shared_storage):
+ shared_storage, pre_shutdown):
granularity = 512
# regions = ((start, count), ...)
@@ -142,15 +142,13 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
(0xf0000, 0x10000),
(0xa0201, 0x1000))
- should_migrate = migrate_bitmaps or persistent and shared_storage
+ should_migrate = \
+ (migrate_bitmaps and (persistent or not pre_shutdown)) or \
+ (persistent and shared_storage)
mig_caps = [{'capability': 'events', 'state': True}]
if migrate_bitmaps:
mig_caps.append({'capability': 'dirty-bitmaps', 'state': True})
- result = self.vm_a.qmp('migrate-set-capabilities',
- capabilities=mig_caps)
- self.assert_qmp(result, 'return', {})
-
self.vm_b.add_incoming(incoming_cmd if online else "defer")
self.vm_b.add_drive(disk_a if shared_storage else disk_b)
@@ -166,6 +164,14 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase):
self.vm_a.hmp_qemu_io('drive0', 'write %d %d' % r)
sha256 = self.get_bitmap_hash(self.vm_a)
+ if pre_shutdown:
+ self.vm_a.shutdown()
+ self.vm_a.launch()
+
+ result = self.vm_a.qmp('migrate-set-capabilities',
+ capabilities=mig_caps)
+ self.assert_qmp(result, 'return', {})
+
result = self.vm_a.qmp('migrate', uri=mig_cmd)
while True:
event = self.vm_a.event_wait('MIGRATION')
@@ -210,11 +216,13 @@ def inject_test_case(klass, name, method, *args, **kwargs):
mc = operator.methodcaller(method, *args, **kwargs)
setattr(klass, 'test_' + method + name, lambda self: mc(self))
-for cmb in list(itertools.product((True, False), repeat=4)):
+for cmb in list(itertools.product((True, False), repeat=5)):
name = ('_' if cmb[0] else '_not_') + 'persistent_'
name += ('_' if cmb[1] else '_not_') + 'migbitmap_'
name += '_online' if cmb[2] else '_offline'
name += '_shared' if cmb[3] else '_nonshared'
+ if (cmb[4]):
+ name += '__pre_shutdown'
inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration',
*list(cmb))
diff --git a/tests/qemu-iotests/169.out b/tests/qemu-iotests/169.out
index 3a89159..5c26d15 100644
--- a/tests/qemu-iotests/169.out
+++ b/tests/qemu-iotests/169.out
@@ -1,5 +1,5 @@
-....................
+....................................
----------------------------------------------------------------------
-Ran 20 tests
+Ran 36 tests
OK