From d645e1328726b38b3c79525eb57842ce29c1df7c Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 3 Jun 2019 20:04:08 -0300 Subject: kvm: i386: halt poll control MSR support Add support for halt poll control MSR: save/restore, migration and new feature name. The purpose of this MSR is to allow the guest to disable host halt poll. Signed-off-by: Marcelo Tosatti Message-Id: <20190603230408.GA7938@amt.cnet> [Do not enable by default, as pointed out by Mark Kanda. - Paolo] Signed-off-by: Paolo Bonzini --- include/standard-headers/asm-x86/kvm_para.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/standard-headers/asm-x86/kvm_para.h b/include/standard-headers/asm-x86/kvm_para.h index 35cd8d6..e171514 100644 --- a/include/standard-headers/asm-x86/kvm_para.h +++ b/include/standard-headers/asm-x86/kvm_para.h @@ -29,6 +29,7 @@ #define KVM_FEATURE_PV_TLB_FLUSH 9 #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 #define KVM_FEATURE_PV_SEND_IPI 11 +#define KVM_FEATURE_POLL_CONTROL 12 #define KVM_HINTS_REALTIME 0 @@ -47,6 +48,7 @@ #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 #define MSR_KVM_STEAL_TIME 0x4b564d03 #define MSR_KVM_PV_EOI_EN 0x4b564d04 +#define MSR_KVM_POLL_CONTROL 0x4b564d05 struct kvm_steal_time { uint64_t steal; -- cgit v1.1 From fef28891aa401e8f9d048c65f32067f51d695f4e Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Wed, 24 Jul 2019 16:31:03 +0200 Subject: loader: Handle memory-mapped ELFs This patch allows handling an ELF memory-mapped, taking care the reference count of the GMappedFile* passed through rom_add_elf_program(). In this case, the 'data' pointer is not heap-allocated, so we cannot free it. Suggested-by: Paolo Bonzini Signed-off-by: Stefano Garzarella Message-Id: <20190724143105.307042-2-sgarzare@redhat.com> Signed-off-by: Paolo Bonzini --- include/hw/elf_ops.h | 2 +- include/hw/loader.h | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index 690f923..fede37e 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -525,7 +525,7 @@ static int glue(load_elf, SZ)(const char *name, int fd, snprintf(label, sizeof(label), "phdr #%d: %s", i, name); /* rom_add_elf_program() seize the ownership of 'data' */ - rom_add_elf_program(label, data, file_size, mem_size, + rom_add_elf_program(label, NULL, data, file_size, mem_size, addr, as); } else { address_space_write(as ? as : &address_space_memory, diff --git a/include/hw/loader.h b/include/hw/loader.h index 3e1b3a4..07fd928 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -258,8 +258,9 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, FWCfgCallback fw_callback, void *callback_opaque, AddressSpace *as, bool read_only); -int rom_add_elf_program(const char *name, void *data, size_t datasize, - size_t romsize, hwaddr addr, AddressSpace *as); +int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data, + size_t datasize, size_t romsize, hwaddr addr, + AddressSpace *as); int rom_check_and_register_reset(void); void rom_set_fw(FWCfgState *f); void rom_set_order_override(int order); -- cgit v1.1 From 816b9fe450220e19acb91a0ce4a8ade7000648d1 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Wed, 24 Jul 2019 16:31:04 +0200 Subject: elf-ops.h: Map into memory the ELF to load In order to reduce the memory footprint we map into memory the ELF to load using g_mapped_file_new_from_fd() instead of reading each sections. In this way we can share the ELF pages between multiple instances of QEMU. Suggested-by: Dr. David Alan Gilbert Suggested-by: Paolo Bonzini Signed-off-by: Stefano Garzarella Message-Id: <20190724143105.307042-3-sgarzare@redhat.com> Signed-off-by: Paolo Bonzini --- include/hw/elf_ops.h | 71 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index fede37e..1496d7e 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -323,8 +323,9 @@ static int glue(load_elf, SZ)(const char *name, int fd, struct elfhdr ehdr; struct elf_phdr *phdr = NULL, *ph; int size, i, total_size; - elf_word mem_size, file_size; + elf_word mem_size, file_size, data_offset; uint64_t addr, low = (uint64_t)-1, high = 0; + GMappedFile *mapped_file = NULL; uint8_t *data = NULL; char label[128]; int ret = ELF_LOAD_FAILED; @@ -409,20 +410,32 @@ static int glue(load_elf, SZ)(const char *name, int fd, } } + /* + * Since we want to be able to modify the mapped buffer, we set the + * 'writeble' parameter to 'true'. Modifications to the buffer are not + * written back to the file. + */ + mapped_file = g_mapped_file_new_from_fd(fd, true, NULL); + if (!mapped_file) { + goto fail; + } + total_size = 0; for(i = 0; i < ehdr.e_phnum; i++) { ph = &phdr[i]; if (ph->p_type == PT_LOAD) { mem_size = ph->p_memsz; /* Size of the ROM */ file_size = ph->p_filesz; /* Size of the allocated data */ - data = g_malloc0(file_size); - if (ph->p_filesz > 0) { - if (lseek(fd, ph->p_offset, SEEK_SET) < 0) { - goto fail; - } - if (read(fd, data, file_size) != file_size) { + data_offset = ph->p_offset; /* Offset where the data is located */ + + if (file_size > 0) { + if (g_mapped_file_get_length(mapped_file) < + file_size + data_offset) { goto fail; } + + data = (uint8_t *)g_mapped_file_get_contents(mapped_file); + data += data_offset; } /* The ELF spec is somewhat vague about the purpose of the @@ -513,25 +526,25 @@ static int glue(load_elf, SZ)(const char *name, int fd, *pentry = ehdr.e_entry - ph->p_vaddr + ph->p_paddr; } - if (mem_size == 0) { - /* Some ELF files really do have segments of zero size; - * just ignore them rather than trying to create empty - * ROM blobs, because the zero-length blob can falsely - * trigger the overlapping-ROM-blobs check. - */ - g_free(data); - } else { + /* Some ELF files really do have segments of zero size; + * just ignore them rather than trying to create empty + * ROM blobs, because the zero-length blob can falsely + * trigger the overlapping-ROM-blobs check. + */ + if (mem_size != 0) { if (load_rom) { snprintf(label, sizeof(label), "phdr #%d: %s", i, name); - /* rom_add_elf_program() seize the ownership of 'data' */ - rom_add_elf_program(label, NULL, data, file_size, mem_size, - addr, as); + /* + * rom_add_elf_program() takes its own reference to + * 'mapped_file'. + */ + rom_add_elf_program(label, mapped_file, data, file_size, + mem_size, addr, as); } else { address_space_write(as ? as : &address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, data, file_size); - g_free(data); } } @@ -547,14 +560,16 @@ static int glue(load_elf, SZ)(const char *name, int fd, struct elf_note *nhdr = NULL; file_size = ph->p_filesz; /* Size of the range of ELF notes */ - data = g_malloc0(file_size); - if (ph->p_filesz > 0) { - if (lseek(fd, ph->p_offset, SEEK_SET) < 0) { - goto fail; - } - if (read(fd, data, file_size) != file_size) { + data_offset = ph->p_offset; /* Offset where the notes are located */ + + if (file_size > 0) { + if (g_mapped_file_get_length(mapped_file) < + file_size + data_offset) { goto fail; } + + data = (uint8_t *)g_mapped_file_get_contents(mapped_file); + data += data_offset; } /* @@ -570,19 +585,17 @@ static int glue(load_elf, SZ)(const char *name, int fd, sizeof(struct elf_note) == sizeof(struct elf64_note); elf_note_fn((void *)nhdr, (void *)&ph->p_align, is64); } - g_free(data); data = NULL; } } - g_free(phdr); if (lowaddr) *lowaddr = (uint64_t)(elf_sword)low; if (highaddr) *highaddr = (uint64_t)(elf_sword)high; - return total_size; + ret = total_size; fail: - g_free(data); + g_mapped_file_unref(mapped_file); g_free(phdr); return ret; } -- cgit v1.1 From e502fe96ac4343a3f4a3c13f28eea03ae7b11c3f Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Wed, 24 Jul 2019 16:31:05 +0200 Subject: hw/i386/pc: Map into memory the initrd In order to reduce the memory footprint we map into memory the initrd using g_mapped_file_new() instead of reading it. In this way we can share the initrd pages between multiple instances of QEMU. Suggested-by: Paolo Bonzini Signed-off-by: Stefano Garzarella Message-Id: <20190724143105.307042-4-sgarzare@redhat.com> Signed-off-by: Paolo Bonzini --- include/hw/i386/pc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 4bb9e29..2afe285 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -41,6 +41,7 @@ struct PCMachineState { FWCfgState *fw_cfg; qemu_irq *gsi; PFlashCFI01 *flash[2]; + GMappedFile *initrd_mapped_file; /* Configuration options: */ uint64_t max_ram_below_4g; -- cgit v1.1 From 52bf9771fdfce98e98cea36a17a18915be6f6b7f Mon Sep 17 00:00:00 2001 From: "tony.nguyen@bt.com" Date: Thu, 18 Jul 2019 06:01:31 +0000 Subject: configure: Define target access alignment in configure This patch moves the define of target access alignment earlier from target/foo/cpu.h to configure. Suggested in Richard Henderson's reply to "[PATCH 1/4] tcg: TCGMemOp is now accelerator independent MemOp" Signed-off-by: Tony Nguyen Message-Id: <11e818d38ebc40e986cfa62dd7d0afdc@tpw09926dag18e.domain1.systemhost.net> Signed-off-by: Paolo Bonzini Signed-off-by: tony.nguyen@bt.com --- include/exec/poison.h | 1 + include/qom/cpu.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/exec/poison.h b/include/exec/poison.h index b862320..955eb86 100644 --- a/include/exec/poison.h +++ b/include/exec/poison.h @@ -35,6 +35,7 @@ #pragma GCC poison TARGET_UNICORE32 #pragma GCC poison TARGET_XTENSA +#pragma GCC poison TARGET_ALIGNED_ONLY #pragma GCC poison TARGET_HAS_BFLT #pragma GCC poison TARGET_NAME #pragma GCC poison TARGET_SUPPORTS_MTTCG diff --git a/include/qom/cpu.h b/include/qom/cpu.h index ddb91bb..77fca95 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -89,7 +89,7 @@ struct TranslationBlock; * @do_unassigned_access: Callback for unassigned access handling. * (this is deprecated: new targets should use do_transaction_failed instead) * @do_unaligned_access: Callback for unaligned access handling, if - * the target defines #ALIGNED_ONLY. + * the target defines #TARGET_ALIGNED_ONLY. * @do_transaction_failed: Callback for handling failed memory transactions * (ie bus faults or external aborts; not MMU faults) * @virtio_is_big_endian: Callback to return %true if a CPU which supports -- cgit v1.1 From 9458a9a1df1a4c719e24512394d548c1fc7abd22 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 6 Feb 2018 18:37:39 +0100 Subject: memory: fix race between TCG and accesses to dirty bitmap There is a race between TCG and accesses to the dirty log: vCPU thread reader thread ----------------------- ----------------------- TLB check -> slow path notdirty_mem_write write to RAM set dirty flag clear dirty flag TLB check -> fast path read memory write to RAM Fortunately, in order to fix it, no change is required to the vCPU thread. However, the reader thread must delay the read after the vCPU thread has finished the write. This can be approximated conservatively by run_on_cpu, which waits for the end of the current translation block. A similar technique is used by KVM, which has to do a synchronous TLB flush after doing a test-and-clear of the dirty-page flags. Reported-by: Dr. David Alan Gilbert Signed-off-by: Paolo Bonzini --- include/exec/memory.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/exec/memory.h b/include/exec/memory.h index d99eb25..fddc2ff 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -425,6 +425,7 @@ struct MemoryListener { void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); void (*log_global_start)(MemoryListener *listener); void (*log_global_stop)(MemoryListener *listener); + void (*log_global_after_sync)(MemoryListener *listener); void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, EventNotifier *e); void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, @@ -1688,6 +1689,17 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, void memory_global_dirty_log_sync(void); /** + * memory_global_dirty_log_sync: synchronize the dirty log for all memory + * + * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. + * This function must be called after the dirty log bitmap is cleared, and + * before dirty guest memory pages are read. If you are using + * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes + * care of doing this. + */ +void memory_global_after_dirty_log_sync(void); + +/** * memory_region_transaction_begin: Start a transaction. * * During a transaction, changes will be accumulated and made visible -- cgit v1.1 From 4ea9a0e3db39a3ef07f8a97c005733d127485891 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Wed, 24 Jul 2019 12:58:21 +0100 Subject: timer: Remove reset notifiers Remove the reset notifer from the core qemu-timer code. The only user was mc146818 and we've just remove it's use. Signed-off-by: Dr. David Alan Gilbert Message-Id: <20190724115823.4199-3-dgilbert@redhat.com> Signed-off-by: Paolo Bonzini --- include/qemu/timer.h | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'include') diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 5d978e1..6817c78 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -228,28 +228,6 @@ void qemu_clock_enable(QEMUClockType type, bool enabled); void qemu_start_warp_timer(void); /** - * qemu_clock_register_reset_notifier: - * @type: the clock type - * @notifier: the notifier function - * - * Register a notifier function to call when the clock - * concerned is reset. - */ -void qemu_clock_register_reset_notifier(QEMUClockType type, - Notifier *notifier); - -/** - * qemu_clock_unregister_reset_notifier: - * @type: the clock type - * @notifier: the notifier function - * - * Unregister a notifier function to call when the clock - * concerned is reset. - */ -void qemu_clock_unregister_reset_notifier(QEMUClockType type, - Notifier *notifier); - -/** * qemu_clock_run_timers: * @type: clock on which to operate * -- cgit v1.1 From 3c2d4c8aa6a98366c9fe2f36305f12199257a7d5 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Wed, 24 Jul 2019 12:58:23 +0100 Subject: timer: last, remove last bits of last The reset notifiers kept a 'last' counter to notice jumps; now that we've remove the notifier we don't need to keep 'last'. Signed-off-by: Dr. David Alan Gilbert Message-Id: <20190724115823.4199-5-dgilbert@redhat.com> Signed-off-by: Paolo Bonzini --- include/qemu/timer.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include') diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 6817c78..5bcab93 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -248,19 +248,6 @@ bool qemu_clock_run_timers(QEMUClockType type); */ bool qemu_clock_run_all_timers(void); -/** - * qemu_clock_get_last: - * - * Returns last clock query time. - */ -uint64_t qemu_clock_get_last(QEMUClockType type); -/** - * qemu_clock_set_last: - * - * Sets last clock query time. - */ -void qemu_clock_set_last(QEMUClockType type, uint64_t last); - /* * QEMUTimerList -- cgit v1.1 From dcb1578069dd072f9aec74e3024cadb9ed0f3aae Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Thu, 25 Jul 2019 11:44:26 +0300 Subject: util/qemu-timer: refactor deadline calculation for external timers icount-based record/replay uses qemu_clock_deadline_ns_all to measure the period until vCPU may be interrupted. This function takes in account the virtual timers, because they belong to the virtual devices that may generate interrupt request or affect the virtual machine state. However, there are a subset of virtual timers, that are marked with 'external' flag. These do not change the virtual machine state and only based on virtual clock. Calculating the deadling using the external timers breaks the determinism, because they do not belong to the replayed part of the virtual machine. This patch fixes the deadline calculation for this case by adding new parameter for skipping the external timers when it is needed. Signed-off-by: Pavel Dovgalyuk -- v2 changes: - added new parameter for timer attribute mask Message-Id: <156404426682.18669.17014100602930969222.stgit@pasha-Precision-3630-Tower> Signed-off-by: Paolo Bonzini --- include/qemu/timer.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 5bcab93..85bc6eb 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -62,13 +62,15 @@ typedef enum { * The following attributes are available: * * QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem + * QEMU_TIMER_ATTR_ALL: mask for all existing attributes * * Timers with this attribute do not recorded in rr mode, therefore it could be * used for the subsystems that operate outside the guest core. Applicable only * with virtual clock type. */ -#define QEMU_TIMER_ATTR_EXTERNAL BIT(0) +#define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0)) +#define QEMU_TIMER_ATTR_ALL 0xffffffff typedef struct QEMUTimerList QEMUTimerList; @@ -177,6 +179,8 @@ bool qemu_clock_use_for_deadline(QEMUClockType type); /** * qemu_clock_deadline_ns_all: * @type: the clock type + * @attr_mask: mask for the timer attributes that are included + * in deadline calculation * * Calculate the deadline across all timer lists associated * with a clock (as opposed to just the default one) @@ -184,7 +188,7 @@ bool qemu_clock_use_for_deadline(QEMUClockType type); * * Returns: time until expiry in nanoseconds or -1 */ -int64_t qemu_clock_deadline_ns_all(QEMUClockType type); +int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask); /** * qemu_clock_get_main_loop_timerlist: -- cgit v1.1 From 13f267133f349f8a322b5385a58688f0dcdf9ed2 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Thu, 25 Jul 2019 11:44:43 +0300 Subject: replay: rename step-related variables and functions This patch renames replay_get_current_step() and related variables to make these names consistent with existing 'icount' command line option and future record/replay hmp/qmp commands. Signed-off-by: Pavel Dovgalyuk Message-Id: <156404428377.18669.15476429889039912070.stgit@pasha-Precision-3630-Tower> Signed-off-by: Paolo Bonzini --- include/sysemu/replay.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 2f2ccdb..d223edd 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -75,7 +75,7 @@ void replay_add_blocker(Error *reason); /* Processing the instructions */ /*! Returns number of executed instructions. */ -uint64_t replay_get_current_step(void); +uint64_t replay_get_current_icount(void); /*! Returns number of instructions to execute in replay mode. */ int replay_get_instructions(void); /*! Updates instructions counter in replay mode. */ -- cgit v1.1 From ba3e7926691ed33e1164fafbd4fb2e8e50e7c4cd Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Thu, 25 Jul 2019 11:44:49 +0300 Subject: icount: clean up cpu_can_io at the entry to the block Most of IO instructions can be executed only at the end of the block in icount mode. Therefore translator can set cpu_can_io flag when translating the last instruction. But when the blocks are chained, then this flag is not reset and may remain set at the beginning of the next block. This patch resets the flag at the entry of any translation block, making I/O operations impossible by default. Signed-off-by: Pavel Dovgalyuk -- v2 changes: - reset can_do_io at the start of every TB (suggested by Paolo Bonzini) Message-Id: <156404428943.18669.15747009371169578935.stgit@pasha-Precision-3630-Tower> Signed-off-by: Paolo Bonzini --- include/exec/gen-icount.h | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index f7669b6..4004e6c 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -7,6 +7,24 @@ static TCGOp *icount_start_insn; +static inline void gen_io_start(void) +{ + TCGv_i32 tmp = tcg_const_i32(1); + tcg_gen_st_i32(tmp, cpu_env, + offsetof(ArchCPU, parent_obj.can_do_io) - + offsetof(ArchCPU, env)); + tcg_temp_free_i32(tmp); +} + +static inline void gen_io_end(void) +{ + TCGv_i32 tmp = tcg_const_i32(0); + tcg_gen_st_i32(tmp, cpu_env, + offsetof(ArchCPU, parent_obj.can_do_io) - + offsetof(ArchCPU, env)); + tcg_temp_free_i32(tmp); +} + static inline void gen_tb_start(TranslationBlock *tb) { TCGv_i32 count, imm; @@ -40,6 +58,8 @@ static inline void gen_tb_start(TranslationBlock *tb) tcg_gen_st16_i32(count, cpu_env, offsetof(ArchCPU, neg.icount_decr.u16.low) - offsetof(ArchCPU, env)); + /* Disable I/O by default */ + gen_io_end(); } tcg_temp_free_i32(count); @@ -57,22 +77,4 @@ static inline void gen_tb_end(TranslationBlock *tb, int num_insns) tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); } -static inline void gen_io_start(void) -{ - TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_st_i32(tmp, cpu_env, - offsetof(ArchCPU, parent_obj.can_do_io) - - offsetof(ArchCPU, env)); - tcg_temp_free_i32(tmp); -} - -static inline void gen_io_end(void) -{ - TCGv_i32 tmp = tcg_const_i32(0); - tcg_gen_st_i32(tmp, cpu_env, - offsetof(ArchCPU, parent_obj.can_do_io) - - offsetof(ArchCPU, env)); - tcg_temp_free_i32(tmp); -} - #endif -- cgit v1.1 From 9e9b10c6491153b60ccfd021328f1f88e1669550 Mon Sep 17 00:00:00 2001 From: Pavel Dovgalyuk Date: Thu, 25 Jul 2019 11:44:55 +0300 Subject: icount: remove unnecessary gen_io_end calls Prior patch resets can_do_io flag at the TB entry. Therefore there is no need in resetting this flag at the end of the block. This patch removes redundant gen_io_end calls. Signed-off-by: Pavel Dovgalyuk Message-Id: <156404429499.18669.13404064982854123855.stgit@pasha-Precision-3630-Tower> Signed-off-by: Paolo Bonzini Signed-off-by: Pavel Dovgalyuk --- include/exec/gen-icount.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index 4004e6c..822c43c 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -16,6 +16,13 @@ static inline void gen_io_start(void) tcg_temp_free_i32(tmp); } +/* + * cpu->can_do_io is cleared automatically at the beginning of + * each translation block. The cost is minimal and only paid + * for -icount, plus it would be very easy to forget doing it + * in the translator. Therefore, backends only need to call + * gen_io_start. + */ static inline void gen_io_end(void) { TCGv_i32 tmp = tcg_const_i32(0); @@ -58,7 +65,6 @@ static inline void gen_tb_start(TranslationBlock *tb) tcg_gen_st16_i32(count, cpu_env, offsetof(ArchCPU, neg.icount_decr.u16.low) - offsetof(ArchCPU, env)); - /* Disable I/O by default */ gen_io_end(); } -- cgit v1.1