diff options
-rw-r--r-- | cpu-all.h | 2 | ||||
-rw-r--r-- | cpu-defs.h | 3 | ||||
-rw-r--r-- | exec.c | 6 | ||||
-rw-r--r-- | hw/vmport.c | 3 | ||||
-rw-r--r-- | kvm-all.c | 36 | ||||
-rw-r--r-- | kvm.h | 1 | ||||
-rw-r--r-- | target-i386/kvm.c | 11 | ||||
-rw-r--r-- | vl.c | 4 |
8 files changed, 49 insertions, 17 deletions
@@ -915,6 +915,8 @@ void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); +void qemu_flush_coalesced_mmio_buffer(void); + /*******************************************/ /* host CPU ticks (if available) */ @@ -197,6 +197,7 @@ typedef struct CPUWatchpoint { const char *cpu_model_str; \ struct KVMState *kvm_state; \ struct kvm_run *kvm_run; \ - int kvm_fd; + int kvm_fd; \ + int kvm_vcpu_dirty; #endif @@ -2415,6 +2415,12 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) kvm_uncoalesce_mmio_region(addr, size); } +void qemu_flush_coalesced_mmio_buffer(void) +{ + if (kvm_enabled()) + kvm_flush_coalesced_mmio_buffer(); +} + ram_addr_t qemu_ram_alloc(ram_addr_t size) { RAMBlock *new_block; diff --git a/hw/vmport.c b/hw/vmport.c index 884af3f..6c9d7c9 100644 --- a/hw/vmport.c +++ b/hw/vmport.c @@ -25,6 +25,7 @@ #include "isa.h" #include "pc.h" #include "sysemu.h" +#include "kvm.h" //#define VMPORT_DEBUG @@ -58,6 +59,8 @@ static uint32_t vmport_ioport_read(void *opaque, uint32_t addr) unsigned char command; uint32_t eax; + cpu_synchronize_state(env); + eax = env->regs[R_EAX]; if (eax != VMPORT_MAGIC) return eax; @@ -57,8 +57,10 @@ struct KVMState KVMSlot slots[32]; int fd; int vmfd; - int regs_modified; int coalesced_mmio; +#ifdef KVM_CAP_COALESCED_MMIO + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; +#endif int broken_set_mem_region; int migration_log; int vcpu_events; @@ -200,6 +202,12 @@ int kvm_init_vcpu(CPUState *env) goto err; } +#ifdef KVM_CAP_COALESCED_MMIO + if (s->coalesced_mmio && !s->coalesced_mmio_ring) + s->coalesced_mmio_ring = (void *) env->kvm_run + + s->coalesced_mmio * PAGE_SIZE; +#endif + ret = kvm_arch_init_vcpu(env); if (ret == 0) { qemu_register_reset(kvm_reset_vcpu, env); @@ -466,10 +474,10 @@ int kvm_init(int smp_cpus) goto err; } + s->coalesced_mmio = 0; #ifdef KVM_CAP_COALESCED_MMIO s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); -#else - s->coalesced_mmio = 0; + s->coalesced_mmio_ring = NULL; #endif s->broken_set_mem_region = 1; @@ -544,14 +552,12 @@ static int kvm_handle_io(uint16_t port, void *data, int direction, int size, return 1; } -static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run) +void kvm_flush_coalesced_mmio_buffer(void) { #ifdef KVM_CAP_COALESCED_MMIO KVMState *s = kvm_state; - if (s->coalesced_mmio) { - struct kvm_coalesced_mmio_ring *ring; - - ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE); + if (s->coalesced_mmio_ring) { + struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; while (ring->first != ring->last) { struct kvm_coalesced_mmio *ent; @@ -567,9 +573,9 @@ static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run) void kvm_cpu_synchronize_state(CPUState *env) { - if (!env->kvm_state->regs_modified) { + if (!env->kvm_vcpu_dirty) { kvm_arch_get_registers(env); - env->kvm_state->regs_modified = 1; + env->kvm_vcpu_dirty = 1; } } @@ -587,9 +593,9 @@ int kvm_cpu_exec(CPUState *env) break; } - if (env->kvm_state->regs_modified) { + if (env->kvm_vcpu_dirty) { kvm_arch_put_registers(env); - env->kvm_state->regs_modified = 0; + env->kvm_vcpu_dirty = 0; } kvm_arch_pre_run(env, run); @@ -609,7 +615,7 @@ int kvm_cpu_exec(CPUState *env) abort(); } - kvm_run_coalesced_mmio(env, run); + kvm_flush_coalesced_mmio_buffer(); ret = 0; /* exit loop */ switch (run->exit_reason) { @@ -939,9 +945,9 @@ static void kvm_invoke_set_guest_debug(void *data) struct kvm_set_guest_debug_data *dbg_data = data; CPUState *env = dbg_data->env; - if (env->kvm_state->regs_modified) { + if (env->kvm_vcpu_dirty) { kvm_arch_put_registers(env); - env->kvm_state->regs_modified = 0; + env->kvm_vcpu_dirty = 0; } dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); } @@ -53,6 +53,7 @@ void kvm_setup_guest_memory(void *start, size_t size); int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); +void kvm_flush_coalesced_mmio_buffer(void); int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, target_ulong len, int type); diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 5b093ce..0d08cd5 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -99,12 +99,18 @@ uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg) break; case R_EDX: ret = cpuid->entries[i].edx; - if (function == 0x80000001) { + switch (function) { + case 1: + /* KVM before 2.6.30 misreports the following features */ + ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; + break; + case 0x80000001: /* On Intel, kvm returns cpuid according to the Intel spec, * so add missing bits according to the AMD spec: */ cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX); ret |= cpuid_1_edx & 0xdfeff7ff; + break; } break; } @@ -794,6 +800,9 @@ static int kvm_put_vcpu_events(CPUState *env) events.sipi_vector = env->sipi_vector; + events.flags = + KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; + return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events); #else return 0; @@ -2996,6 +2996,7 @@ static void gui_update(void *opaque) DisplayState *ds = opaque; DisplayChangeListener *dcl = ds->listeners; + qemu_flush_coalesced_mmio_buffer(); dpy_refresh(ds); while (dcl != NULL) { @@ -3011,6 +3012,7 @@ static void nographic_update(void *opaque) { uint64_t interval = GUI_REFRESH_INTERVAL; + qemu_flush_coalesced_mmio_buffer(); qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock)); } @@ -3280,6 +3282,8 @@ static int cpu_can_run(CPUState *env) return 0; if (env->stopped) return 0; + if (!vm_running) + return 0; return 1; } |