diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2011-08-22 08:24:58 -0500 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2011-09-02 10:34:55 -0500 |
commit | 12d4536f7d911b6d87a766ad7300482ea663cea2 (patch) | |
tree | 848fe9cb11b82145fae05ee05aace4f90a3564af /qemu-timer.c | |
parent | d9cd446b4f6ff464f9520898116534de988d9bc1 (diff) | |
download | qemu-12d4536f7d911b6d87a766ad7300482ea663cea2.zip qemu-12d4536f7d911b6d87a766ad7300482ea663cea2.tar.gz qemu-12d4536f7d911b6d87a766ad7300482ea663cea2.tar.bz2 |
main: force enabling of I/O thread
Enabling the I/O thread by default seems like an important part of declaring
1.0. Besides allowing true SMP support with KVM, the I/O thread means that the
TCG VCPU doesn't have to multiplex itself with the I/O dispatch routines which
currently requires a (racey) signal based alarm system.
I know there have been concerns about performance. I think so far the ones that
have come up (virtio-net) are most likely due to secondary reasons like
decreased batching.
I think we ought to force enabling I/O thread early in 1.0 development and
commit to resolving any lingering issues.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'qemu-timer.c')
-rw-r--r-- | qemu-timer.c | 53 |
1 files changed, 0 insertions, 53 deletions
diff --git a/qemu-timer.c b/qemu-timer.c index 19313d3..46dd483 100644 --- a/qemu-timer.c +++ b/qemu-timer.c @@ -101,22 +101,6 @@ static int64_t cpu_get_clock(void) } } -#ifndef CONFIG_IOTHREAD -static int64_t qemu_icount_delta(void) -{ - if (!use_icount) { - return 5000 * (int64_t) 1000000; - } else if (use_icount == 1) { - /* When not using an adaptive execution frequency - we tend to get badly out of sync with real time, - so just delay for a reasonable amount of time. */ - return 0; - } else { - return cpu_get_icount() - cpu_get_clock(); - } -} -#endif - /* enable cpu_get_ticks() */ void cpu_enable_ticks(void) { @@ -688,9 +672,7 @@ void configure_icount(const char *option) if (!option) return; -#ifdef CONFIG_IOTHREAD vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL); -#endif if (strcmp(option, "auto") != 0) { icount_time_shift = strtol(option, NULL, 0); @@ -1178,41 +1160,6 @@ void quit_timers(void) int qemu_calculate_timeout(void) { -#ifndef CONFIG_IOTHREAD - int timeout; - - if (!vm_running) - timeout = 5000; - else { - /* XXX: use timeout computed from timers */ - int64_t add; - int64_t delta; - /* Advance virtual time to the next event. */ - delta = qemu_icount_delta(); - if (delta > 0) { - /* If virtual time is ahead of real time then just - wait for IO. */ - timeout = (delta + 999999) / 1000000; - } else { - /* Wait for either IO to occur or the next - timer event. */ - add = qemu_next_icount_deadline(); - /* We advance the timer before checking for IO. - Limit the amount we advance so that early IO - activity won't get the guest too far ahead. */ - if (add > 10000000) - add = 10000000; - delta += add; - qemu_icount += qemu_icount_round (add); - timeout = delta / 1000000; - if (timeout < 0) - timeout = 0; - } - } - - return timeout; -#else /* CONFIG_IOTHREAD */ return 1000; -#endif } |