diff options
author | Andreas Färber <afaerber@suse.de> | 2012-03-14 01:38:32 +0100 |
---|---|---|
committer | Andreas Färber <afaerber@suse.de> | 2012-03-14 22:20:27 +0100 |
commit | 9349b4f9fda360f3d9adc4cf4443a1a9b429c17e (patch) | |
tree | eec784672b45df3b321a2724669ff80639555ce0 | |
parent | 5bfcb36ec49192cb22f45f4b7ae805c530a1fd9e (diff) | |
download | qemu-9349b4f9fda360f3d9adc4cf4443a1a9b429c17e.zip qemu-9349b4f9fda360f3d9adc4cf4443a1a9b429c17e.tar.gz qemu-9349b4f9fda360f3d9adc4cf4443a1a9b429c17e.tar.bz2 |
Rename CPUState -> CPUArchState
Scripted conversion:
for file in *.[hc] hw/*.[hc] hw/kvm/*.[hc] linux-user/*.[hc] linux-user/m68k/*.[hc] bsd-user/*.[hc] darwin-user/*.[hc] tcg/*/*.[hc] target-*/cpu.h; do
sed -i "s/CPUState/CPUArchState/g" $file
done
All occurrences of CPUArchState are expected to be replaced by QOM CPUState,
once all targets are QOM'ified and common fields have been extracted.
Signed-off-by: Andreas Färber <afaerber@suse.de>
Reviewed-by: Anthony Liguori <aliguori@us.ibm.com>
59 files changed, 419 insertions, 419 deletions
diff --git a/bsd-user/main.c b/bsd-user/main.c index 78533d5..48cb715 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -70,11 +70,11 @@ int cpu_get_pic_interrupt(CPUX86State *env) #endif /* These are no-ops because we are not threadsafe. */ -static inline void cpu_exec_start(CPUState *env) +static inline void cpu_exec_start(CPUArchState *env) { } -static inline void cpu_exec_end(CPUState *env) +static inline void cpu_exec_end(CPUArchState *env) { } @@ -713,7 +713,7 @@ static void usage(void) exit(1); } -THREAD CPUState *thread_env; +THREAD CPUArchState *thread_env; /* Assumes contents are already zeroed. */ void init_task_state(TaskState *ts) @@ -737,7 +737,7 @@ int main(int argc, char **argv) struct target_pt_regs regs1, *regs = ®s1; struct image_info info1, *info = &info1; TaskState ts1, *ts = &ts1; - CPUState *env; + CPUArchState *env; int optind; const char *r; int gdbstub_port = 0; diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index 1ba2d08..8a5ee3d 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -139,8 +139,8 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6); void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -extern THREAD CPUState *thread_env; -void cpu_loop(CPUState *env); +extern THREAD CPUArchState *thread_env; +void cpu_loop(CPUArchState *env); char *target_strerror(int err); int get_osversion(void); void fork_start(void); @@ -167,13 +167,13 @@ void print_openbsd_syscall_ret(int num, abi_long ret); extern int do_strace; /* signal.c */ -void process_pending_signals(CPUState *cpu_env); +void process_pending_signals(CPUArchState *cpu_env); void signal_init(void); -//int queue_signal(CPUState *env, int sig, target_siginfo_t *info); +//int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info); //void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); //void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); -long do_sigreturn(CPUState *env); -long do_rt_sigreturn(CPUState *env); +long do_sigreturn(CPUArchState *env); +long do_rt_sigreturn(CPUArchState *env); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); /* mmap.c */ diff --git a/bsd-user/signal.c b/bsd-user/signal.c index 40313c8..445f69e 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -33,6 +33,6 @@ void signal_init(void) { } -void process_pending_signals(CPUState *cpu_env) +void process_pending_signals(CPUArchState *cpu_env) { } @@ -322,20 +322,20 @@ void page_set_flags(target_ulong start, target_ulong end, int flags); int page_check_range(target_ulong start, target_ulong len, int flags); #endif -CPUState *cpu_copy(CPUState *env); -CPUState *qemu_get_cpu(int cpu); +CPUArchState *cpu_copy(CPUArchState *env); +CPUArchState *qemu_get_cpu(int cpu); #define CPU_DUMP_CODE 0x00010000 -void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, +void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf, int flags); -void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf, +void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf, int flags); -void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) +void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -extern CPUState *first_cpu; -DECLARE_TLS(CPUState *,cpu_single_env); +extern CPUArchState *first_cpu; +DECLARE_TLS(CPUArchState *,cpu_single_env); #define cpu_single_env tls_var(cpu_single_env) /* Flags for use in ENV->INTERRUPT_PENDING. @@ -389,23 +389,23 @@ DECLARE_TLS(CPUState *,cpu_single_env); | CPU_INTERRUPT_TGT_EXT_4) #ifndef CONFIG_USER_ONLY -typedef void (*CPUInterruptHandler)(CPUState *, int); +typedef void (*CPUInterruptHandler)(CPUArchState *, int); extern CPUInterruptHandler cpu_interrupt_handler; -static inline void cpu_interrupt(CPUState *s, int mask) +static inline void cpu_interrupt(CPUArchState *s, int mask) { cpu_interrupt_handler(s, mask); } #else /* USER_ONLY */ -void cpu_interrupt(CPUState *env, int mask); +void cpu_interrupt(CPUArchState *env, int mask); #endif /* USER_ONLY */ -void cpu_reset_interrupt(CPUState *env, int mask); +void cpu_reset_interrupt(CPUArchState *env, int mask); -void cpu_exit(CPUState *s); +void cpu_exit(CPUArchState *s); -bool qemu_cpu_has_work(CPUState *env); +bool qemu_cpu_has_work(CPUArchState *env); /* Breakpoint/watchpoint flags */ #define BP_MEM_READ 0x01 @@ -416,26 +416,26 @@ bool qemu_cpu_has_work(CPUState *env); #define BP_GDB 0x10 #define BP_CPU 0x20 -int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, +int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, CPUBreakpoint **breakpoint); -int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); -void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); -void cpu_breakpoint_remove_all(CPUState *env, int mask); -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, +int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags); +void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint); +void cpu_breakpoint_remove_all(CPUArchState *env, int mask); +int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int flags, CPUWatchpoint **watchpoint); -int cpu_watchpoint_remove(CPUState *env, target_ulong addr, +int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, int flags); -void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); -void cpu_watchpoint_remove_all(CPUState *env, int mask); +void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint); +void cpu_watchpoint_remove_all(CPUArchState *env, int mask); #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ -void cpu_single_step(CPUState *env, int enabled); -void cpu_state_reset(CPUState *s); -int cpu_is_stopped(CPUState *env); -void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); +void cpu_single_step(CPUArchState *env, int enabled); +void cpu_state_reset(CPUArchState *s); +int cpu_is_stopped(CPUArchState *env); +void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data); #define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_IN_ASM (1 << 1) @@ -466,7 +466,7 @@ int cpu_str_to_log_mask(const char *str); /* Return the physical page corresponding to a virtual one. Use it only for debugging because no protection checks are done. Return -1 if no page found. */ -target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); +target_phys_addr_t cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr); /* memory API */ @@ -508,12 +508,12 @@ extern int mem_prealloc; /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << 5) -void cpu_tlb_update_dirty(CPUState *env); +void cpu_tlb_update_dirty(CPUArchState *env); void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); #endif /* !CONFIG_USER_ONLY */ -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, +int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write); #endif /* CPU_ALL_H */ @@ -202,7 +202,7 @@ typedef struct CPUWatchpoint { jmp_buf jmp_env; \ int exception_index; \ \ - CPUState *next_cpu; /* next CPU sharing TB cache */ \ + CPUArchState *next_cpu; /* next CPU sharing TB cache */ \ int cpu_index; /* CPU index (informative) */ \ uint32_t host_tid; /* host thread ID */ \ int numa_node; /* NUMA node this cpu is belonging to */ \ @@ -26,12 +26,12 @@ int tb_invalidated_flag; //#define CONFIG_DEBUG_EXEC -bool qemu_cpu_has_work(CPUState *env) +bool qemu_cpu_has_work(CPUArchState *env) { return cpu_has_work(env); } -void cpu_loop_exit(CPUState *env) +void cpu_loop_exit(CPUArchState *env) { env->current_tb = NULL; longjmp(env->jmp_env, 1); @@ -41,7 +41,7 @@ void cpu_loop_exit(CPUState *env) restored in a state compatible with the CPU emulator */ #if defined(CONFIG_SOFTMMU) -void cpu_resume_from_signal(CPUState *env, void *puc) +void cpu_resume_from_signal(CPUArchState *env, void *puc) { /* XXX: restore cpu registers saved in host registers */ @@ -52,7 +52,7 @@ void cpu_resume_from_signal(CPUState *env, void *puc) /* Execute the code without caching the generated code. An interpreter could be used if available. */ -static void cpu_exec_nocache(CPUState *env, int max_cycles, +static void cpu_exec_nocache(CPUArchState *env, int max_cycles, TranslationBlock *orig_tb) { unsigned long next_tb; @@ -79,7 +79,7 @@ static void cpu_exec_nocache(CPUState *env, int max_cycles, tb_free(tb); } -static TranslationBlock *tb_find_slow(CPUState *env, +static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, target_ulong cs_base, uint64_t flags) @@ -135,7 +135,7 @@ static TranslationBlock *tb_find_slow(CPUState *env, return tb; } -static inline TranslationBlock *tb_find_fast(CPUState *env) +static inline TranslationBlock *tb_find_fast(CPUArchState *env) { TranslationBlock *tb; target_ulong cs_base, pc; @@ -163,7 +163,7 @@ CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) return old_handler; } -static void cpu_handle_debug_exception(CPUState *env) +static void cpu_handle_debug_exception(CPUArchState *env) { CPUWatchpoint *wp; @@ -181,7 +181,7 @@ static void cpu_handle_debug_exception(CPUState *env) volatile sig_atomic_t exit_request; -int cpu_exec(CPUState *env) +int cpu_exec(CPUArchState *env) { int ret, interrupt_request; TranslationBlock *tb; @@ -58,7 +58,7 @@ #endif /* CONFIG_LINUX */ -static CPUState *next_cpu; +static CPUArchState *next_cpu; /***********************************************************/ /* guest cycle counter */ @@ -89,7 +89,7 @@ TimersState timers_state; int64_t cpu_get_icount(void) { int64_t icount; - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; icount = qemu_icount; if (env) { @@ -339,7 +339,7 @@ void configure_icount(const char *option) void hw_error(const char *fmt, ...) { va_list ap; - CPUState *env; + CPUArchState *env; va_start(ap, fmt); fprintf(stderr, "qemu: hardware error: "); @@ -359,7 +359,7 @@ void hw_error(const char *fmt, ...) void cpu_synchronize_all_states(void) { - CPUState *cpu; + CPUArchState *cpu; for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { cpu_synchronize_state(cpu); @@ -368,7 +368,7 @@ void cpu_synchronize_all_states(void) void cpu_synchronize_all_post_reset(void) { - CPUState *cpu; + CPUArchState *cpu; for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { cpu_synchronize_post_reset(cpu); @@ -377,14 +377,14 @@ void cpu_synchronize_all_post_reset(void) void cpu_synchronize_all_post_init(void) { - CPUState *cpu; + CPUArchState *cpu; for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { cpu_synchronize_post_init(cpu); } } -int cpu_is_stopped(CPUState *env) +int cpu_is_stopped(CPUArchState *env) { return !runstate_is_running() || env->stopped; } @@ -402,7 +402,7 @@ static void do_vm_stop(RunState state) } } -static int cpu_can_run(CPUState *env) +static int cpu_can_run(CPUArchState *env) { if (env->stop) { return 0; @@ -413,7 +413,7 @@ static int cpu_can_run(CPUState *env) return 1; } -static bool cpu_thread_is_idle(CPUState *env) +static bool cpu_thread_is_idle(CPUArchState *env) { if (env->stop || env->queued_work_first) { return false; @@ -430,7 +430,7 @@ static bool cpu_thread_is_idle(CPUState *env) bool all_cpu_threads_idle(void) { - CPUState *env; + CPUArchState *env; for (env = first_cpu; env != NULL; env = env->next_cpu) { if (!cpu_thread_is_idle(env)) { @@ -440,7 +440,7 @@ bool all_cpu_threads_idle(void) return true; } -static void cpu_handle_guest_debug(CPUState *env) +static void cpu_handle_guest_debug(CPUArchState *env) { gdb_set_stop_cpu(env); qemu_system_debug_request(); @@ -494,7 +494,7 @@ static void qemu_init_sigbus(void) prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); } -static void qemu_kvm_eat_signals(CPUState *env) +static void qemu_kvm_eat_signals(CPUArchState *env) { struct timespec ts = { 0, 0 }; siginfo_t siginfo; @@ -537,7 +537,7 @@ static void qemu_init_sigbus(void) { } -static void qemu_kvm_eat_signals(CPUState *env) +static void qemu_kvm_eat_signals(CPUArchState *env) { } #endif /* !CONFIG_LINUX */ @@ -547,7 +547,7 @@ static void dummy_signal(int sig) { } -static void qemu_kvm_init_cpu_signals(CPUState *env) +static void qemu_kvm_init_cpu_signals(CPUArchState *env) { int r; sigset_t set; @@ -582,7 +582,7 @@ static void qemu_tcg_init_cpu_signals(void) } #else /* _WIN32 */ -static void qemu_kvm_init_cpu_signals(CPUState *env) +static void qemu_kvm_init_cpu_signals(CPUArchState *env) { abort(); } @@ -619,7 +619,7 @@ void qemu_init_cpu_loop(void) qemu_thread_get_self(&io_thread); } -void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) +void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data) { struct qemu_work_item wi; @@ -641,14 +641,14 @@ void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) qemu_cpu_kick(env); while (!wi.done) { - CPUState *self_env = cpu_single_env; + CPUArchState *self_env = cpu_single_env; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); cpu_single_env = self_env; } } -static void flush_queued_work(CPUState *env) +static void flush_queued_work(CPUArchState *env) { struct qemu_work_item *wi; @@ -665,7 +665,7 @@ static void flush_queued_work(CPUState *env) qemu_cond_broadcast(&qemu_work_cond); } -static void qemu_wait_io_event_common(CPUState *env) +static void qemu_wait_io_event_common(CPUArchState *env) { if (env->stop) { env->stop = 0; @@ -678,7 +678,7 @@ static void qemu_wait_io_event_common(CPUState *env) static void qemu_tcg_wait_io_event(void) { - CPUState *env; + CPUArchState *env; while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs @@ -696,7 +696,7 @@ static void qemu_tcg_wait_io_event(void) } } -static void qemu_kvm_wait_io_event(CPUState *env) +static void qemu_kvm_wait_io_event(CPUArchState *env) { while (cpu_thread_is_idle(env)) { qemu_cond_wait(env->halt_cond, &qemu_global_mutex); @@ -708,7 +708,7 @@ static void qemu_kvm_wait_io_event(CPUState *env) static void *qemu_kvm_cpu_thread_fn(void *arg) { - CPUState *env = arg; + CPUArchState *env = arg; int r; qemu_mutex_lock(&qemu_global_mutex); @@ -745,7 +745,7 @@ static void tcg_exec_all(void); static void *qemu_tcg_cpu_thread_fn(void *arg) { - CPUState *env = arg; + CPUArchState *env = arg; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(env->thread); @@ -779,7 +779,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) return NULL; } -static void qemu_cpu_kick_thread(CPUState *env) +static void qemu_cpu_kick_thread(CPUArchState *env) { #ifndef _WIN32 int err; @@ -800,7 +800,7 @@ static void qemu_cpu_kick_thread(CPUState *env) void qemu_cpu_kick(void *_env) { - CPUState *env = _env; + CPUArchState *env = _env; qemu_cond_broadcast(env->halt_cond); if (kvm_enabled() && !env->thread_kicked) { @@ -825,7 +825,7 @@ void qemu_cpu_kick_self(void) int qemu_cpu_is_self(void *_env) { - CPUState *env = _env; + CPUArchState *env = _env; return qemu_thread_is_self(env->thread); } @@ -852,7 +852,7 @@ void qemu_mutex_unlock_iothread(void) static int all_vcpus_paused(void) { - CPUState *penv = first_cpu; + CPUArchState *penv = first_cpu; while (penv) { if (!penv->stopped) { @@ -866,7 +866,7 @@ static int all_vcpus_paused(void) void pause_all_vcpus(void) { - CPUState *penv = first_cpu; + CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, false); while (penv) { @@ -899,7 +899,7 @@ void pause_all_vcpus(void) void resume_all_vcpus(void) { - CPUState *penv = first_cpu; + CPUArchState *penv = first_cpu; qemu_clock_enable(vm_clock, true); while (penv) { @@ -912,7 +912,7 @@ void resume_all_vcpus(void) static void qemu_tcg_init_vcpu(void *_env) { - CPUState *env = _env; + CPUArchState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { @@ -935,7 +935,7 @@ static void qemu_tcg_init_vcpu(void *_env) } } -static void qemu_kvm_start_vcpu(CPUState *env) +static void qemu_kvm_start_vcpu(CPUArchState *env) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); @@ -949,7 +949,7 @@ static void qemu_kvm_start_vcpu(CPUState *env) void qemu_init_vcpu(void *_env) { - CPUState *env = _env; + CPUArchState *env = _env; env->nr_cores = smp_cores; env->nr_threads = smp_threads; @@ -996,7 +996,7 @@ void vm_stop_force_state(RunState state) } } -static int tcg_cpu_exec(CPUState *env) +static int tcg_cpu_exec(CPUArchState *env) { int ret; #ifdef CONFIG_PROFILER @@ -1045,7 +1045,7 @@ static void tcg_exec_all(void) next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { - CPUState *env = next_cpu; + CPUArchState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); @@ -1065,7 +1065,7 @@ static void tcg_exec_all(void) void set_numa_modes(void) { - CPUState *env; + CPUArchState *env; int i; for (env = first_cpu; env != NULL; env = env->next_cpu) { @@ -1111,7 +1111,7 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) CpuInfoList *qmp_query_cpus(Error **errp) { CpuInfoList *head = NULL, *cur_item = NULL; - CPUState *env; + CPUArchState *env; for(env = first_cpu; env != NULL; env = env->next_cpu) { CpuInfoList *info; @@ -1157,7 +1157,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename, { FILE *f; uint32_t l; - CPUState *env; + CPUArchState *env; uint8_t buf[1024]; if (!has_cpu) { @@ -1232,7 +1232,7 @@ exit: void qmp_inject_nmi(Error **errp) { #if defined(TARGET_I386) - CPUState *env; + CPUArchState *env; for (env = first_cpu; env != NULL; env = env->next_cpu) { if (!env->apic_state) { diff --git a/darwin-user/main.c b/darwin-user/main.c index f5cadc7..544e219 100644 --- a/darwin-user/main.c +++ b/darwin-user/main.c @@ -71,7 +71,7 @@ void gemu_log(const char *fmt, ...) va_end(ap); } -int cpu_get_pic_interrupt(CPUState *env) +int cpu_get_pic_interrupt(CPUArchState *env) { return -1; } @@ -729,7 +729,7 @@ static void usage(void) } /* XXX: currently only used for async signals (see signal.c) */ -CPUState *global_env; +CPUArchState *global_env; /* used to free thread contexts */ TaskState *first_task_state; @@ -741,7 +741,7 @@ int main(int argc, char **argv) const char *log_mask = NULL; struct target_pt_regs regs1, *regs = ®s1; TaskState ts1, *ts = &ts1; - CPUState *env; + CPUArchState *env; int optind; short use_gdbstub = 0; const char *r; diff --git a/darwin-user/qemu.h b/darwin-user/qemu.h index b6d3e6c..9e16c8e 100644 --- a/darwin-user/qemu.h +++ b/darwin-user/qemu.h @@ -104,8 +104,8 @@ void qerror(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void write_dt(void *ptr, unsigned long addr, unsigned long limit, int flags); -extern CPUState *global_env; -void cpu_loop(CPUState *env); +extern CPUArchState *global_env; +void cpu_loop(CPUArchState *env); void init_paths(const char *prefix); const char *path(const char *pathname); @@ -122,7 +122,7 @@ void signal_init(void); int queue_signal(int sig, target_siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); -long do_sigreturn(CPUState *env, int num); +long do_sigreturn(CPUArchState *env, int num); /* machload.c */ int mach_exec(const char * filename, char ** argv, char ** envp, diff --git a/darwin-user/signal.c b/darwin-user/signal.c index 8f9705d..489cb64 100644 --- a/darwin-user/signal.c +++ b/darwin-user/signal.c @@ -377,12 +377,12 @@ long do_sigreturn(CPUX86State *env, int num) #else static void setup_frame(int sig, struct emulated_sigaction *ka, - void *set, CPUState *env) + void *set, CPUArchState *env) { fprintf(stderr, "setup_frame: not implemented\n"); } -long do_sigreturn(CPUState *env, int num) +long do_sigreturn(CPUArchState *env, int num) { int i = 0; struct target_sigcontext *scp = get_int_arg(&i, env); diff --git a/def-helper.h b/def-helper.h index 8a822c7..5d057d6 100644 --- a/def-helper.h +++ b/def-helper.h @@ -52,7 +52,7 @@ #define dh_ctype_tl target_ulong #define dh_ctype_ptr void * #define dh_ctype_void void -#define dh_ctype_env CPUState * +#define dh_ctype_env CPUArchState * #define dh_ctype(t) dh_ctype_##t /* We can't use glue() here because it falls foul of C preprocessor @@ -339,7 +339,7 @@ const char *lookup_symbol(target_ulong orig_addr) #include "monitor.h" static int monitor_disas_is_physical; -static CPUState *monitor_disas_env; +static CPUArchState *monitor_disas_env; static int monitor_read_memory (bfd_vma memaddr, bfd_byte *myaddr, int length, @@ -363,7 +363,7 @@ monitor_fprintf(FILE *stream, const char *fmt, ...) return 0; } -void monitor_disas(Monitor *mon, CPUState *env, +void monitor_disas(Monitor *mon, CPUArchState *env, target_ulong pc, int nb_insn, int is_physical, int flags) { int count, i; @@ -8,7 +8,7 @@ void disas(FILE *out, void *code, unsigned long size); void target_disas(FILE *out, target_ulong code, target_ulong size, int flags); -void monitor_disas(Monitor *mon, CPUState *env, +void monitor_disas(Monitor *mon, CPUArchState *env, target_ulong pc, int nb_insn, int is_physical, int flags); /* Look up symbol for debugging purpose. Returns "" if unknown. */ diff --git a/dyngen-exec.h b/dyngen-exec.h index 09be9ea..083e20b 100644 --- a/dyngen-exec.h +++ b/dyngen-exec.h @@ -61,10 +61,10 @@ #endif #if defined(AREG0) -register CPUState *env asm(AREG0); +register CPUArchState *env asm(AREG0); #else /* TODO: Try env = cpu_single_env. */ -extern CPUState *env; +extern CPUArchState *env; #endif #endif /* !defined(__DYNGEN_EXEC_H__) */ @@ -76,30 +76,30 @@ extern uint16_t gen_opc_icount[OPC_BUF_SIZE]; #include "qemu-log.h" -void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); -void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); -void restore_state_to_opc(CPUState *env, struct TranslationBlock *tb, +void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); +void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); +void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, int pc_pos); void cpu_gen_init(void); -int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, +int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, int *gen_code_size_ptr); int cpu_restore_state(struct TranslationBlock *tb, - CPUState *env, unsigned long searched_pc); -void cpu_resume_from_signal(CPUState *env1, void *puc); -void cpu_io_recompile(CPUState *env, void *retaddr); -TranslationBlock *tb_gen_code(CPUState *env, + CPUArchState *env, unsigned long searched_pc); +void cpu_resume_from_signal(CPUArchState *env1, void *puc); +void cpu_io_recompile(CPUArchState *env, void *retaddr); +TranslationBlock *tb_gen_code(CPUArchState *env, target_ulong pc, target_ulong cs_base, int flags, int cflags); -void cpu_exec_init(CPUState *env); -void QEMU_NORETURN cpu_loop_exit(CPUState *env1); +void cpu_exec_init(CPUArchState *env); +void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1); int page_unprotect(target_ulong address, unsigned long pc, void *puc); void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access); -void tlb_flush_page(CPUState *env, target_ulong addr); -void tlb_flush(CPUState *env, int flush_global); +void tlb_flush_page(CPUArchState *env, target_ulong addr); +void tlb_flush(CPUArchState *env, int flush_global); #if !defined(CONFIG_USER_ONLY) -void tlb_set_page(CPUState *env, target_ulong vaddr, +void tlb_set_page(CPUArchState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, target_ulong size); #endif @@ -182,7 +182,7 @@ static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) } void tb_free(TranslationBlock *tb); -void tb_flush(CPUState *env); +void tb_flush(CPUArchState *env); void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); @@ -305,7 +305,7 @@ uint64_t io_mem_read(struct MemoryRegion *mr, target_phys_addr_t addr, void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr, uint64_t value, unsigned size); -void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx, +void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, void *retaddr); #include "softmmu_defs.h" @@ -333,15 +333,15 @@ void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx, #endif #if defined(CONFIG_USER_ONLY) -static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) +static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) { return addr; } #else -tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr); +tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); #endif -typedef void (CPUDebugExcpHandler)(CPUState *env); +typedef void (CPUDebugExcpHandler)(CPUArchState *env); CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); @@ -353,7 +353,7 @@ extern volatile sig_atomic_t exit_request; /* Deterministic execution requires that IO only be performed on the last instruction of a TB so that interrupts take effect immediately. */ -static inline int can_do_io(CPUState *env) +static inline int can_do_io(CPUArchState *env) { if (!use_icount) { return 1; @@ -123,10 +123,10 @@ static MemoryRegion io_mem_subpage_ram; #endif -CPUState *first_cpu; +CPUArchState *first_cpu; /* current CPU in the current thread. It is only valid inside cpu_exec() */ -DEFINE_TLS(CPUState *,cpu_single_env); +DEFINE_TLS(CPUArchState *,cpu_single_env); /* 0 = Do not count executed instructions. 1 = Precise instruction counting. 2 = Adaptive rate instruction counting. */ @@ -509,7 +509,7 @@ static target_phys_addr_t section_addr(MemoryRegionSection *section, } static void tlb_protect_code(ram_addr_t ram_addr); -static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, +static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr, target_ulong vaddr); #define mmap_lock() do { } while(0) #define mmap_unlock() do { } while(0) @@ -661,7 +661,7 @@ void cpu_exec_init_all(void) static int cpu_common_post_load(void *opaque, int version_id) { - CPUState *env = opaque; + CPUArchState *env = opaque; /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the version_id is increased. */ @@ -678,16 +678,16 @@ static const VMStateDescription vmstate_cpu_common = { .minimum_version_id_old = 1, .post_load = cpu_common_post_load, .fields = (VMStateField []) { - VMSTATE_UINT32(halted, CPUState), - VMSTATE_UINT32(interrupt_request, CPUState), + VMSTATE_UINT32(halted, CPUArchState), + VMSTATE_UINT32(interrupt_request, CPUArchState), VMSTATE_END_OF_LIST() } }; #endif -CPUState *qemu_get_cpu(int cpu) +CPUArchState *qemu_get_cpu(int cpu) { - CPUState *env = first_cpu; + CPUArchState *env = first_cpu; while (env) { if (env->cpu_index == cpu) @@ -698,9 +698,9 @@ CPUState *qemu_get_cpu(int cpu) return env; } -void cpu_exec_init(CPUState *env) +void cpu_exec_init(CPUArchState *env) { - CPUState **penv; + CPUArchState **penv; int cpu_index; #if defined(CONFIG_USER_ONLY) @@ -799,9 +799,9 @@ static void page_flush_tb(void) /* flush all the translation blocks */ /* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUState *env1) +void tb_flush(CPUArchState *env1) { - CPUState *env; + CPUArchState *env; #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(code_gen_ptr - code_gen_buffer), @@ -934,7 +934,7 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n) void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { - CPUState *env; + CPUArchState *env; PageDesc *p; unsigned int h, n1; tb_page_addr_t phys_pc; @@ -1043,7 +1043,7 @@ static void build_page_bitmap(PageDesc *p) } } -TranslationBlock *tb_gen_code(CPUState *env, +TranslationBlock *tb_gen_code(CPUArchState *env, target_ulong pc, target_ulong cs_base, int flags, int cflags) { @@ -1090,7 +1090,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) { TranslationBlock *tb, *tb_next, *saved_tb; - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; tb_page_addr_t tb_start, tb_end; PageDesc *p; int n; @@ -1227,7 +1227,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr, int n; #ifdef TARGET_HAS_PRECISE_SMC TranslationBlock *current_tb = NULL; - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; int current_tb_modified = 0; target_ulong current_pc = 0; target_ulong current_cs_base = 0; @@ -1457,12 +1457,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) #if defined(TARGET_HAS_ICE) #if defined(CONFIG_USER_ONLY) -static void breakpoint_invalidate(CPUState *env, target_ulong pc) +static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) { tb_invalidate_phys_page_range(pc, pc + 1, 0); } #else -static void breakpoint_invalidate(CPUState *env, target_ulong pc) +static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) { target_phys_addr_t addr; ram_addr_t ram_addr; @@ -1482,19 +1482,19 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc) #endif /* TARGET_HAS_ICE */ #if defined(CONFIG_USER_ONLY) -void cpu_watchpoint_remove_all(CPUState *env, int mask) +void cpu_watchpoint_remove_all(CPUArchState *env, int mask) { } -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int flags, CPUWatchpoint **watchpoint) { return -ENOSYS; } #else /* Add a watchpoint. */ -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int flags, CPUWatchpoint **watchpoint) { target_ulong len_mask = ~(len - 1); @@ -1527,7 +1527,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, } /* Remove a specific watchpoint. */ -int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, int flags) { target_ulong len_mask = ~(len - 1); @@ -1544,7 +1544,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, } /* Remove a specific watchpoint by reference. */ -void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) +void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) { QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); @@ -1554,7 +1554,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) } /* Remove all matching watchpoints. */ -void cpu_watchpoint_remove_all(CPUState *env, int mask) +void cpu_watchpoint_remove_all(CPUArchState *env, int mask) { CPUWatchpoint *wp, *next; @@ -1566,7 +1566,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask) #endif /* Add a breakpoint. */ -int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, +int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, CPUBreakpoint **breakpoint) { #if defined(TARGET_HAS_ICE) @@ -1594,7 +1594,7 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, } /* Remove a specific breakpoint. */ -int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) +int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; @@ -1612,7 +1612,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) } /* Remove a specific breakpoint by reference. */ -void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) +void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) { #if defined(TARGET_HAS_ICE) QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); @@ -1624,7 +1624,7 @@ void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) } /* Remove all matching breakpoints. */ -void cpu_breakpoint_remove_all(CPUState *env, int mask) +void cpu_breakpoint_remove_all(CPUArchState *env, int mask) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp, *next; @@ -1638,7 +1638,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask) /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ -void cpu_single_step(CPUState *env, int enabled) +void cpu_single_step(CPUArchState *env, int enabled) { #if defined(TARGET_HAS_ICE) if (env->singlestep_enabled != enabled) { @@ -1694,7 +1694,7 @@ void cpu_set_log_filename(const char *filename) cpu_set_log(loglevel); } -static void cpu_unlink_tb(CPUState *env) +static void cpu_unlink_tb(CPUArchState *env) { /* FIXME: TB unchaining isn't SMP safe. For now just ignore the problem and hope the cpu will stop of its own accord. For userspace @@ -1716,7 +1716,7 @@ static void cpu_unlink_tb(CPUState *env) #ifndef CONFIG_USER_ONLY /* mask must never be zero, except for A20 change call */ -static void tcg_handle_interrupt(CPUState *env, int mask) +static void tcg_handle_interrupt(CPUArchState *env, int mask) { int old_mask; @@ -1747,19 +1747,19 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; #else /* CONFIG_USER_ONLY */ -void cpu_interrupt(CPUState *env, int mask) +void cpu_interrupt(CPUArchState *env, int mask) { env->interrupt_request |= mask; cpu_unlink_tb(env); } #endif /* CONFIG_USER_ONLY */ -void cpu_reset_interrupt(CPUState *env, int mask) +void cpu_reset_interrupt(CPUArchState *env, int mask) { env->interrupt_request &= ~mask; } -void cpu_exit(CPUState *env) +void cpu_exit(CPUArchState *env) { env->exit_request = 1; cpu_unlink_tb(env); @@ -1837,7 +1837,7 @@ int cpu_str_to_log_mask(const char *str) return mask; } -void cpu_abort(CPUState *env, const char *fmt, ...) +void cpu_abort(CPUArchState *env, const char *fmt, ...) { va_list ap; va_list ap2; @@ -1877,17 +1877,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...) abort(); } -CPUState *cpu_copy(CPUState *env) +CPUArchState *cpu_copy(CPUArchState *env) { - CPUState *new_env = cpu_init(env->cpu_model_str); - CPUState *next_cpu = new_env->next_cpu; + CPUArchState *new_env = cpu_init(env->cpu_model_str); + CPUArchState *next_cpu = new_env->next_cpu; int cpu_index = new_env->cpu_index; #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; CPUWatchpoint *wp; #endif - memcpy(new_env, env, sizeof(CPUState)); + memcpy(new_env, env, sizeof(CPUArchState)); /* Preserve chaining and index. */ new_env->next_cpu = next_cpu; @@ -1913,7 +1913,7 @@ CPUState *cpu_copy(CPUState *env) #if !defined(CONFIG_USER_ONLY) -static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) +static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr) { unsigned int i; @@ -1947,7 +1947,7 @@ static CPUTLBEntry s_cputlb_empty_entry = { * entries from the TLB at any time, so flushing more entries than * required is only an efficiency issue, not a correctness issue. */ -void tlb_flush(CPUState *env, int flush_global) +void tlb_flush(CPUArchState *env, int flush_global) { int i; @@ -1984,7 +1984,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) } } -void tlb_flush_page(CPUState *env, target_ulong addr) +void tlb_flush_page(CPUArchState *env, target_ulong addr) { int i; int mmu_idx; @@ -2025,7 +2025,7 @@ static void tlb_protect_code(ram_addr_t ram_addr) /* update the TLB so that writes in physical page 'phys_addr' are no longer tested for self modifying code */ -static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, +static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr, target_ulong vaddr) { cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); @@ -2047,7 +2047,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags) { - CPUState *env; + CPUArchState *env; unsigned long length, start1; int i; @@ -2102,7 +2102,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) } /* update the TLB according to the current state of the dirty bits */ -void cpu_tlb_update_dirty(CPUState *env) +void cpu_tlb_update_dirty(CPUArchState *env) { int i; int mmu_idx; @@ -2120,7 +2120,7 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) /* update the TLB corresponding to virtual page vaddr so that it is no longer dirty */ -static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) +static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) { int i; int mmu_idx; @@ -2133,7 +2133,7 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) /* Our TLB does not support large pages, so remember the area covered by large pages and trigger a full TLB flush if these are invalidated. */ -static void tlb_add_large_page(CPUState *env, target_ulong vaddr, +static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, target_ulong size) { target_ulong mask = ~(size - 1); @@ -2174,7 +2174,7 @@ static bool is_ram_rom_romd(MemoryRegionSection *s) /* Add a new TLB entry. At most one entry for a given virtual address is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the supplied size is only used by tlb_flush_page. */ -void tlb_set_page(CPUState *env, target_ulong vaddr, +void tlb_set_page(CPUArchState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, target_ulong size) { @@ -2277,11 +2277,11 @@ void tlb_set_page(CPUState *env, target_ulong vaddr, #else -void tlb_flush(CPUState *env, int flush_global) +void tlb_flush(CPUArchState *env, int flush_global) { } -void tlb_flush_page(CPUState *env, target_ulong addr) +void tlb_flush_page(CPUArchState *env, target_ulong addr) { } @@ -2542,7 +2542,7 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc) return 0; } -static inline void tlb_set_dirty(CPUState *env, +static inline void tlb_set_dirty(CPUArchState *env, unsigned long addr, target_ulong vaddr) { } @@ -3299,7 +3299,7 @@ static const MemoryRegionOps notdirty_mem_ops = { /* Generate a debug exception if a watchpoint has been hit. */ static void check_watchpoint(int offset, int len_mask, int flags) { - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; target_ulong pc, cs_base; TranslationBlock *tb; target_ulong vaddr; @@ -3544,7 +3544,7 @@ static void core_begin(MemoryListener *listener) static void core_commit(MemoryListener *listener) { - CPUState *env; + CPUArchState *env; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ @@ -3734,7 +3734,7 @@ MemoryRegion *get_system_io(void) /* physical memory access (slow version, mainly for debug) */ #if defined(CONFIG_USER_ONLY) -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, +int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { int l, flags; @@ -4440,7 +4440,7 @@ void stq_be_phys(target_phys_addr_t addr, uint64_t val) } /* virtual memory access for debug (includes writing to ROM) */ -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, +int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { int l; @@ -4471,7 +4471,7 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr, /* in deterministic execution mode, instructions doing device I/Os must be at the end of the TB */ -void cpu_io_recompile(CPUState *env, void *retaddr) +void cpu_io_recompile(CPUArchState *env, void *retaddr) { TranslationBlock *tb; uint32_t n, cflags; @@ -4585,7 +4585,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) /* NOTE: this function can trigger an exception */ /* NOTE2: the returned address is not exactly the physical address: it is the offset relative to phys_ram_base */ -tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) +tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) { int mmu_idx, page_index, pd; void *p; @@ -42,7 +42,7 @@ #include "kvm.h" #ifndef TARGET_CPU_MEMORY_RW_DEBUG -static inline int target_memory_rw_debug(CPUState *env, target_ulong addr, +static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { return cpu_memory_rw_debug(env, addr, buf, len, is_write); @@ -287,9 +287,9 @@ enum RSState { RS_SYSCALL, }; typedef struct GDBState { - CPUState *c_cpu; /* current CPU for step/continue ops */ - CPUState *g_cpu; /* current CPU for other ops */ - CPUState *query_cpu; /* for q{f|s}ThreadInfo */ + CPUArchState *c_cpu; /* current CPU for step/continue ops */ + CPUArchState *g_cpu; /* current CPU for other ops */ + CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */ enum RSState state; /* parsing state */ char line_buf[MAX_PACKET_LENGTH]; int line_buf_index; @@ -1655,12 +1655,12 @@ static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n) #define NUM_CORE_REGS 0 -static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n) +static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n) { return 0; } -static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n) +static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n) { return 0; } @@ -1736,7 +1736,7 @@ static const char *get_feature_xml(const char *p, const char **newp) } #endif -static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg) +static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg) { GDBRegisterState *r; @@ -1751,7 +1751,7 @@ static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg) return 0; } -static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg) +static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg) { GDBRegisterState *r; @@ -1773,7 +1773,7 @@ static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg) gdb reading a CPU register, and set_reg is gdb modifying a CPU register. */ -void gdb_register_coprocessor(CPUState * env, +void gdb_register_coprocessor(CPUArchState * env, gdb_reg_cb get_reg, gdb_reg_cb set_reg, int num_regs, const char *xml, int g_pos) { @@ -1820,7 +1820,7 @@ static const int xlat_gdb_type[] = { static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) { - CPUState *env; + CPUArchState *env; int err = 0; if (kvm_enabled()) @@ -1854,7 +1854,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type) static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) { - CPUState *env; + CPUArchState *env; int err = 0; if (kvm_enabled()) @@ -1887,7 +1887,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type) static void gdb_breakpoint_remove_all(void) { - CPUState *env; + CPUArchState *env; if (kvm_enabled()) { kvm_remove_all_breakpoints(gdbserver_state->c_cpu); @@ -1939,7 +1939,7 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc) #endif } -static inline int gdb_id(CPUState *env) +static inline int gdb_id(CPUArchState *env) { #if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL) return env->host_tid; @@ -1948,9 +1948,9 @@ static inline int gdb_id(CPUState *env) #endif } -static CPUState *find_cpu(uint32_t thread_id) +static CPUArchState *find_cpu(uint32_t thread_id) { - CPUState *env; + CPUArchState *env; for (env = first_cpu; env != NULL; env = env->next_cpu) { if (gdb_id(env) == thread_id) { @@ -1963,7 +1963,7 @@ static CPUState *find_cpu(uint32_t thread_id) static int gdb_handle_packet(GDBState *s, const char *line_buf) { - CPUState *env; + CPUArchState *env; const char *p; uint32_t thread; int ch, reg_size, type, res; @@ -2383,7 +2383,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) return RS_IDLE; } -void gdb_set_stop_cpu(CPUState *env) +void gdb_set_stop_cpu(CPUArchState *env) { gdbserver_state->c_cpu = env; gdbserver_state->g_cpu = env; @@ -2393,7 +2393,7 @@ void gdb_set_stop_cpu(CPUState *env) static void gdb_vm_state_change(void *opaque, int running, RunState state) { GDBState *s = gdbserver_state; - CPUState *env = s->c_cpu; + CPUArchState *env = s->c_cpu; char buf[256]; const char *type; int ret; @@ -2602,7 +2602,7 @@ static void gdb_read_byte(GDBState *s, int ch) } /* Tell the remote gdb that the process has exited. */ -void gdb_exit(CPUState *env, int code) +void gdb_exit(CPUArchState *env, int code) { GDBState *s; char buf[4]; @@ -2642,7 +2642,7 @@ gdb_queuesig (void) } int -gdb_handlesig (CPUState *env, int sig) +gdb_handlesig (CPUArchState *env, int sig) { GDBState *s; char buf[256]; @@ -2691,7 +2691,7 @@ gdb_handlesig (CPUState *env, int sig) } /* Tell the remote gdb that the process has exited due to SIG. */ -void gdb_signalled(CPUState *env, int sig) +void gdb_signalled(CPUArchState *env, int sig) { GDBState *s; char buf[4]; @@ -2787,7 +2787,7 @@ int gdbserver_start(int port) } /* Disable gdb stub for child processes. */ -void gdbserver_fork(CPUState *env) +void gdbserver_fork(CPUArchState *env) { GDBState *s = gdbserver_state; if (gdbserver_fd < 0 || s->fd < 0) @@ -11,22 +11,22 @@ #define GDB_WATCHPOINT_ACCESS 4 #ifdef NEED_CPU_H -typedef void (*gdb_syscall_complete_cb)(CPUState *env, +typedef void (*gdb_syscall_complete_cb)(CPUArchState *env, target_ulong ret, target_ulong err); void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...); int use_gdb_syscalls(void); -void gdb_set_stop_cpu(CPUState *env); -void gdb_exit(CPUState *, int); +void gdb_set_stop_cpu(CPUArchState *env); +void gdb_exit(CPUArchState *, int); #ifdef CONFIG_USER_ONLY int gdb_queuesig (void); -int gdb_handlesig (CPUState *, int); -void gdb_signalled(CPUState *, int); -void gdbserver_fork(CPUState *); +int gdb_handlesig (CPUArchState *, int); +void gdb_signalled(CPUArchState *, int); +void gdbserver_fork(CPUArchState *); #endif /* Get or set a register. Returns the size of the register. */ -typedef int (*gdb_reg_cb)(CPUState *env, uint8_t *buf, int reg); -void gdb_register_coprocessor(CPUState *env, +typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg); +void gdb_register_coprocessor(CPUArchState *env, gdb_reg_cb get_reg, gdb_reg_cb set_reg, int num_regs, const char *xml, int g_pos); diff --git a/gen-icount.h b/gen-icount.h index 5fb3829..430cb44 100644 --- a/gen-icount.h +++ b/gen-icount.h @@ -14,13 +14,13 @@ static inline void gen_icount_start(void) icount_label = gen_new_label(); count = tcg_temp_local_new_i32(); - tcg_gen_ld_i32(count, cpu_env, offsetof(CPUState, icount_decr.u32)); + tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32)); /* This is a horrid hack to allow fixing up the value later. */ icount_arg = gen_opparam_ptr + 1; tcg_gen_subi_i32(count, count, 0xdeadbeef); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); - tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low)); + tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low)); tcg_temp_free_i32(count); } @@ -36,13 +36,13 @@ static void gen_icount_end(TranslationBlock *tb, int num_insns) static inline void gen_io_start(void) { TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io)); tcg_temp_free_i32(tmp); } static inline void gen_io_end(void) { TCGv_i32 tmp = tcg_const_i32(0); - tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io)); tcg_temp_free_i32(tmp); } @@ -190,7 +190,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) static void kvm_reset_vcpu(void *opaque) { - CPUState *env = opaque; + CPUArchState *env = opaque; kvm_arch_reset_vcpu(env); } @@ -200,7 +200,7 @@ int kvm_pit_in_kernel(void) return kvm_state->pit_in_kernel; } -int kvm_init_vcpu(CPUState *env) +int kvm_init_vcpu(CPUArchState *env) { KVMState *s = kvm_state; long mmap_size; @@ -830,7 +830,7 @@ static MemoryListener kvm_memory_listener = { .priority = 10, }; -static void kvm_handle_interrupt(CPUState *env, int mask) +static void kvm_handle_interrupt(CPUArchState *env, int mask) { env->interrupt_request |= mask; @@ -1135,7 +1135,7 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size, } } -static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run) +static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) { fprintf(stderr, "KVM internal error."); if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { @@ -1190,7 +1190,7 @@ void kvm_flush_coalesced_mmio_buffer(void) static void do_kvm_cpu_synchronize_state(void *_env) { - CPUState *env = _env; + CPUArchState *env = _env; if (!env->kvm_vcpu_dirty) { kvm_arch_get_registers(env); @@ -1198,26 +1198,26 @@ static void do_kvm_cpu_synchronize_state(void *_env) } } -void kvm_cpu_synchronize_state(CPUState *env) +void kvm_cpu_synchronize_state(CPUArchState *env) { if (!env->kvm_vcpu_dirty) { run_on_cpu(env, do_kvm_cpu_synchronize_state, env); } } -void kvm_cpu_synchronize_post_reset(CPUState *env) +void kvm_cpu_synchronize_post_reset(CPUArchState *env) { kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); env->kvm_vcpu_dirty = 0; } -void kvm_cpu_synchronize_post_init(CPUState *env) +void kvm_cpu_synchronize_post_init(CPUArchState *env) { kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); env->kvm_vcpu_dirty = 0; } -int kvm_cpu_exec(CPUState *env) +int kvm_cpu_exec(CPUArchState *env) { struct kvm_run *run = env->kvm_run; int ret, run_ret; @@ -1350,7 +1350,7 @@ int kvm_vm_ioctl(KVMState *s, int type, ...) return ret; } -int kvm_vcpu_ioctl(CPUState *env, int type, ...) +int kvm_vcpu_ioctl(CPUArchState *env, int type, ...) { int ret; void *arg; @@ -1439,7 +1439,7 @@ void kvm_setup_guest_memory(void *start, size_t size) } #ifdef KVM_CAP_SET_GUEST_DEBUG -struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, +struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env, target_ulong pc) { struct kvm_sw_breakpoint *bp; @@ -1452,26 +1452,26 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, return NULL; } -int kvm_sw_breakpoints_active(CPUState *env) +int kvm_sw_breakpoints_active(CPUArchState *env) { return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); } struct kvm_set_guest_debug_data { struct kvm_guest_debug dbg; - CPUState *env; + CPUArchState *env; int err; }; static void kvm_invoke_set_guest_debug(void *data) { struct kvm_set_guest_debug_data *dbg_data = data; - CPUState *env = dbg_data->env; + CPUArchState *env = dbg_data->env; dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); } -int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) +int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) { struct kvm_set_guest_debug_data data; @@ -1487,11 +1487,11 @@ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) return data.err; } -int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { struct kvm_sw_breakpoint *bp; - CPUState *env; + CPUArchState *env; int err; if (type == GDB_BREAKPOINT_SW) { @@ -1532,11 +1532,11 @@ int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, return 0; } -int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { struct kvm_sw_breakpoint *bp; - CPUState *env; + CPUArchState *env; int err; if (type == GDB_BREAKPOINT_SW) { @@ -1573,11 +1573,11 @@ int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, return 0; } -void kvm_remove_all_breakpoints(CPUState *current_env) +void kvm_remove_all_breakpoints(CPUArchState *current_env) { struct kvm_sw_breakpoint *bp, *next; KVMState *s = current_env->kvm_state; - CPUState *env; + CPUArchState *env; QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { @@ -1598,29 +1598,29 @@ void kvm_remove_all_breakpoints(CPUState *current_env) #else /* !KVM_CAP_SET_GUEST_DEBUG */ -int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) +int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) { return -EINVAL; } -int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -void kvm_remove_all_breakpoints(CPUState *current_env) +void kvm_remove_all_breakpoints(CPUArchState *current_env) { } #endif /* !KVM_CAP_SET_GUEST_DEBUG */ -int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) +int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset) { struct kvm_signal_mask *sigmask; int r; @@ -1690,7 +1690,7 @@ int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) return 0; } -int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) +int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr) { return kvm_arch_on_sigbus_vcpu(env, code, addr); } @@ -22,7 +22,7 @@ int kvm_pit_in_kernel(void) } -int kvm_init_vcpu(CPUState *env) +int kvm_init_vcpu(CPUArchState *env) { return -ENOSYS; } @@ -46,19 +46,19 @@ void kvm_flush_coalesced_mmio_buffer(void) { } -void kvm_cpu_synchronize_state(CPUState *env) +void kvm_cpu_synchronize_state(CPUArchState *env) { } -void kvm_cpu_synchronize_post_reset(CPUState *env) +void kvm_cpu_synchronize_post_reset(CPUArchState *env) { } -void kvm_cpu_synchronize_post_init(CPUState *env) +void kvm_cpu_synchronize_post_init(CPUArchState *env) { } -int kvm_cpu_exec(CPUState *env) +int kvm_cpu_exec(CPUArchState *env) { abort (); } @@ -87,29 +87,29 @@ void kvm_setup_guest_memory(void *start, size_t size) { } -int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) +int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) { return -ENOSYS; } -int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -void kvm_remove_all_breakpoints(CPUState *current_env) +void kvm_remove_all_breakpoints(CPUArchState *current_env) { } #ifndef _WIN32 -int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) +int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset) { abort(); } @@ -125,7 +125,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign) return -ENOSYS; } -int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) +int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr) { return 1; } @@ -61,9 +61,9 @@ int kvm_has_gsi_routing(void); int kvm_allows_irq0_override(void); #ifdef NEED_CPU_H -int kvm_init_vcpu(CPUState *env); +int kvm_init_vcpu(CPUArchState *env); -int kvm_cpu_exec(CPUState *env); +int kvm_cpu_exec(CPUArchState *env); #if !defined(CONFIG_USER_ONLY) void kvm_setup_guest_memory(void *start, size_t size); @@ -73,19 +73,19 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); void kvm_flush_coalesced_mmio_buffer(void); #endif -int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type); -int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type); -void kvm_remove_all_breakpoints(CPUState *current_env); -int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap); +void kvm_remove_all_breakpoints(CPUArchState *current_env); +int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap); #ifndef _WIN32 -int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset); +int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset); #endif int kvm_pit_in_kernel(void); -int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr); +int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr); int kvm_on_sigbus(int code, void *addr); /* internal API */ @@ -98,20 +98,20 @@ int kvm_ioctl(KVMState *s, int type, ...); int kvm_vm_ioctl(KVMState *s, int type, ...); -int kvm_vcpu_ioctl(CPUState *env, int type, ...); +int kvm_vcpu_ioctl(CPUArchState *env, int type, ...); /* Arch specific hooks */ extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; -void kvm_arch_pre_run(CPUState *env, struct kvm_run *run); -void kvm_arch_post_run(CPUState *env, struct kvm_run *run); +void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run); +void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run); -int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run); +int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run); -int kvm_arch_process_async_events(CPUState *env); +int kvm_arch_process_async_events(CPUArchState *env); -int kvm_arch_get_registers(CPUState *env); +int kvm_arch_get_registers(CPUArchState *env); /* state subset only touched by the VCPU itself during runtime */ #define KVM_PUT_RUNTIME_STATE 1 @@ -120,15 +120,15 @@ int kvm_arch_get_registers(CPUState *env); /* full state set, modified during initialization or on vmload */ #define KVM_PUT_FULL_STATE 3 -int kvm_arch_put_registers(CPUState *env, int level); +int kvm_arch_put_registers(CPUArchState *env, int level); int kvm_arch_init(KVMState *s); -int kvm_arch_init_vcpu(CPUState *env); +int kvm_arch_init_vcpu(CPUArchState *env); -void kvm_arch_reset_vcpu(CPUState *env); +void kvm_arch_reset_vcpu(CPUArchState *env); -int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr); +int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr); int kvm_arch_on_sigbus(int code, void *addr); void kvm_arch_init_irq_routing(KVMState *s); @@ -153,14 +153,14 @@ struct kvm_sw_breakpoint { QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint); -struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, +struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env, target_ulong pc); -int kvm_sw_breakpoints_active(CPUState *env); +int kvm_sw_breakpoints_active(CPUArchState *env); -int kvm_arch_insert_sw_breakpoint(CPUState *current_env, +int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env, struct kvm_sw_breakpoint *bp); -int kvm_arch_remove_sw_breakpoint(CPUState *current_env, +int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env, struct kvm_sw_breakpoint *bp); int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type); @@ -168,35 +168,35 @@ int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type); void kvm_arch_remove_all_hw_breakpoints(void); -void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg); +void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg); -bool kvm_arch_stop_on_emulation_error(CPUState *env); +bool kvm_arch_stop_on_emulation_error(CPUArchState *env); int kvm_check_extension(KVMState *s, unsigned int extension); uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, uint32_t index, int reg); -void kvm_cpu_synchronize_state(CPUState *env); -void kvm_cpu_synchronize_post_reset(CPUState *env); -void kvm_cpu_synchronize_post_init(CPUState *env); +void kvm_cpu_synchronize_state(CPUArchState *env); +void kvm_cpu_synchronize_post_reset(CPUArchState *env); +void kvm_cpu_synchronize_post_init(CPUArchState *env); /* generic hooks - to be moved/refactored once there are more users */ -static inline void cpu_synchronize_state(CPUState *env) +static inline void cpu_synchronize_state(CPUArchState *env) { if (kvm_enabled()) { kvm_cpu_synchronize_state(env); } } -static inline void cpu_synchronize_post_reset(CPUState *env) +static inline void cpu_synchronize_post_reset(CPUArchState *env) { if (kvm_enabled()) { kvm_cpu_synchronize_post_reset(env); } } -static inline void cpu_synchronize_post_init(CPUState *env) +static inline void cpu_synchronize_post_init(CPUArchState *env) { if (kvm_enabled()) { kvm_cpu_synchronize_post_init(env); diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 48e3232..e502b39 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -1044,7 +1044,7 @@ static inline void bswap_sym(struct elf_sym *sym) { } #endif #ifdef USE_ELF_CORE_DUMP -static int elf_core_dump(int, const CPUState *); +static int elf_core_dump(int, const CPUArchState *); #endif /* USE_ELF_CORE_DUMP */ static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); @@ -1930,7 +1930,7 @@ int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, * from given cpu into just specified register set. Prototype is: * * static void elf_core_copy_regs(taret_elf_gregset_t *regs, - * const CPUState *env); + * const CPUArchState *env); * * Parameters: * regs - copy register values into here (allocated and zeroed by caller) @@ -2054,8 +2054,8 @@ static void fill_auxv_note(struct memelfnote *, const TaskState *); static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); static size_t note_size(const struct memelfnote *); static void free_note_info(struct elf_note_info *); -static int fill_note_info(struct elf_note_info *, long, const CPUState *); -static void fill_thread_info(struct elf_note_info *, const CPUState *); +static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); +static void fill_thread_info(struct elf_note_info *, const CPUArchState *); static int core_dump_filename(const TaskState *, char *, size_t); static int dump_write(int, const void *, size_t); @@ -2448,7 +2448,7 @@ static int write_note(struct memelfnote *men, int fd) return (0); } -static void fill_thread_info(struct elf_note_info *info, const CPUState *env) +static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) { TaskState *ts = (TaskState *)env->opaque; struct elf_thread_status *ets; @@ -2466,10 +2466,10 @@ static void fill_thread_info(struct elf_note_info *info, const CPUState *env) } static int fill_note_info(struct elf_note_info *info, - long signr, const CPUState *env) + long signr, const CPUArchState *env) { #define NUMNOTES 3 - CPUState *cpu = NULL; + CPUArchState *cpu = NULL; TaskState *ts = (TaskState *)env->opaque; int i; @@ -2595,7 +2595,7 @@ static int write_note_info(struct elf_note_info *info, int fd) * handler (provided that target process haven't registered * handler for that) that does the dump when signal is received. */ -static int elf_core_dump(int signr, const CPUState *env) +static int elf_core_dump(int signr, const CPUArchState *env) { const TaskState *ts = (const TaskState *)env->opaque; struct vm_area_struct *vma = NULL; diff --git a/linux-user/main.c b/linux-user/main.c index 3b48882..962677e 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -146,7 +146,7 @@ static inline void exclusive_idle(void) Must only be called from outside cpu_arm_exec. */ static inline void start_exclusive(void) { - CPUState *other; + CPUArchState *other; pthread_mutex_lock(&exclusive_lock); exclusive_idle(); @@ -172,7 +172,7 @@ static inline void end_exclusive(void) } /* Wait for exclusive ops to finish, and begin cpu execution. */ -static inline void cpu_exec_start(CPUState *env) +static inline void cpu_exec_start(CPUArchState *env) { pthread_mutex_lock(&exclusive_lock); exclusive_idle(); @@ -181,7 +181,7 @@ static inline void cpu_exec_start(CPUState *env) } /* Mark cpu as not executing, and release pending exclusive ops. */ -static inline void cpu_exec_end(CPUState *env) +static inline void cpu_exec_end(CPUArchState *env) { pthread_mutex_lock(&exclusive_lock); env->running = 0; @@ -206,11 +206,11 @@ void cpu_list_unlock(void) } #else /* if !CONFIG_USE_NPTL */ /* These are no-ops because we are not threadsafe. */ -static inline void cpu_exec_start(CPUState *env) +static inline void cpu_exec_start(CPUArchState *env) { } -static inline void cpu_exec_end(CPUState *env) +static inline void cpu_exec_end(CPUArchState *env) { } @@ -2888,7 +2888,7 @@ void cpu_loop(CPUS390XState *env) #endif /* TARGET_S390X */ -THREAD CPUState *thread_env; +THREAD CPUArchState *thread_env; void task_settid(TaskState *ts) { @@ -3277,7 +3277,7 @@ int main(int argc, char **argv, char **envp) struct image_info info1, *info = &info1; struct linux_binprm bprm; TaskState *ts; - CPUState *env; + CPUArchState *env; int optind; char **target_environ, **wrk; char **target_argv; diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 308dbc0..6889567 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -171,7 +171,7 @@ struct linux_binprm { char **argv; char **envp; char * filename; /* Name of binary */ - int (*core_dump)(int, const CPUState *); /* coredump routine */ + int (*core_dump)(int, const CPUArchState *); /* coredump routine */ }; void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); @@ -196,8 +196,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8); void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); -extern THREAD CPUState *thread_env; -void cpu_loop(CPUState *env); +extern THREAD CPUArchState *thread_env; +void cpu_loop(CPUArchState *env); char *target_strerror(int err); int get_osversion(void); void fork_start(void); @@ -219,15 +219,15 @@ void print_syscall_ret(int num, abi_long arg1); extern int do_strace; /* signal.c */ -void process_pending_signals(CPUState *cpu_env); +void process_pending_signals(CPUArchState *cpu_env); void signal_init(void); -int queue_signal(CPUState *env, int sig, target_siginfo_t *info); +int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); int target_to_host_signal(int sig); int host_to_target_signal(int sig); -long do_sigreturn(CPUState *env); -long do_rt_sigreturn(CPUState *env); +long do_sigreturn(CPUArchState *env); +long do_rt_sigreturn(CPUArchState *env); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); #ifdef TARGET_I386 diff --git a/linux-user/signal.c b/linux-user/signal.c index f44f78e..fca51e2 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -347,7 +347,7 @@ void signal_init(void) /* signal queue handling */ -static inline struct sigqueue *alloc_sigqueue(CPUState *env) +static inline struct sigqueue *alloc_sigqueue(CPUArchState *env) { TaskState *ts = env->opaque; struct sigqueue *q = ts->first_free; @@ -357,7 +357,7 @@ static inline struct sigqueue *alloc_sigqueue(CPUState *env) return q; } -static inline void free_sigqueue(CPUState *env, struct sigqueue *q) +static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q) { TaskState *ts = env->opaque; q->next = ts->first_free; @@ -415,7 +415,7 @@ static void QEMU_NORETURN force_sig(int target_sig) /* queue a signal so that it will be send to the virtual CPU as soon as possible */ -int queue_signal(CPUState *env, int sig, target_siginfo_t *info) +int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) { TaskState *ts = env->opaque; struct emulated_sigtable *k; @@ -5214,25 +5214,25 @@ long do_rt_sigreturn(CPUAlphaState *env) #else static void setup_frame(int sig, struct target_sigaction *ka, - target_sigset_t *set, CPUState *env) + target_sigset_t *set, CPUArchState *env) { fprintf(stderr, "setup_frame: not implemented\n"); } static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, - target_sigset_t *set, CPUState *env) + target_sigset_t *set, CPUArchState *env) { fprintf(stderr, "setup_rt_frame: not implemented\n"); } -long do_sigreturn(CPUState *env) +long do_sigreturn(CPUArchState *env) { fprintf(stderr, "do_sigreturn: not implemented\n"); return -TARGET_ENOSYS; } -long do_rt_sigreturn(CPUState *env) +long do_rt_sigreturn(CPUArchState *env) { fprintf(stderr, "do_rt_sigreturn: not implemented\n"); return -TARGET_ENOSYS; @@ -5240,7 +5240,7 @@ long do_rt_sigreturn(CPUState *env) #endif -void process_pending_signals(CPUState *cpu_env) +void process_pending_signals(CPUArchState *cpu_env) { int sig; abi_ulong handler; diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 29888bd..9f5e53a 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -3955,7 +3955,7 @@ static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; typedef struct { - CPUState *env; + CPUArchState *env; pthread_mutex_t mutex; pthread_cond_t cond; pthread_t thread; @@ -3968,7 +3968,7 @@ typedef struct { static void *clone_func(void *arg) { new_thread_info *info = arg; - CPUState *env; + CPUArchState *env; TaskState *ts; env = info->env; @@ -3998,7 +3998,7 @@ static void *clone_func(void *arg) static int clone_func(void *arg) { - CPUState *env = arg; + CPUArchState *env = arg; cpu_loop(env); /* never exits */ return 0; @@ -4007,13 +4007,13 @@ static int clone_func(void *arg) /* do_fork() Must return host values and target errnos (unlike most do_*() functions). */ -static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp, +static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, abi_ulong parent_tidptr, target_ulong newtls, abi_ulong child_tidptr) { int ret; TaskState *ts; - CPUState *new_env; + CPUArchState *new_env; #if defined(CONFIG_USE_NPTL) unsigned int nptl_flags; sigset_t sigmask; @@ -4640,7 +4640,7 @@ int get_osversion(void) static int open_self_maps(void *cpu_env, int fd) { - TaskState *ts = ((CPUState *)cpu_env)->opaque; + TaskState *ts = ((CPUArchState *)cpu_env)->opaque; dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", (unsigned long long)ts->info->stack_limit, @@ -4653,7 +4653,7 @@ static int open_self_maps(void *cpu_env, int fd) static int open_self_stat(void *cpu_env, int fd) { - TaskState *ts = ((CPUState *)cpu_env)->opaque; + TaskState *ts = ((CPUArchState *)cpu_env)->opaque; abi_ulong start_stack = ts->info->start_stack; int i; @@ -4678,7 +4678,7 @@ static int open_self_stat(void *cpu_env, int fd) static int open_self_auxv(void *cpu_env, int fd) { - TaskState *ts = ((CPUState *)cpu_env)->opaque; + TaskState *ts = ((CPUArchState *)cpu_env)->opaque; abi_ulong auxv = ts->info->saved_auxv; abi_ulong len = ts->info->auxv_len; char *ptr; @@ -4784,13 +4784,13 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, be disabling signals. */ if (first_cpu->next_cpu) { TaskState *ts; - CPUState **lastp; - CPUState *p; + CPUArchState **lastp; + CPUArchState *p; cpu_list_lock(); lastp = &first_cpu; p = first_cpu; - while (p && p != (CPUState *)cpu_env) { + while (p && p != (CPUArchState *)cpu_env) { lastp = &p->next_cpu; p = p->next_cpu; } @@ -4801,7 +4801,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, /* Remove the CPU from the list. */ *lastp = p->next_cpu; cpu_list_unlock(); - ts = ((CPUState *)cpu_env)->opaque; + ts = ((CPUArchState *)cpu_env)->opaque; if (ts->child_tidptr) { put_user_u32(0, ts->child_tidptr); sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, @@ -6091,7 +6091,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, break; case TARGET_NR_mprotect: { - TaskState *ts = ((CPUState *)cpu_env)->opaque; + TaskState *ts = ((CPUArchState *)cpu_env)->opaque; /* Special hack to detect libc making the stack executable. */ if ((arg3 & PROT_GROWSDOWN) && arg1 >= ts->info->stack_limit @@ -7076,7 +7076,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ defined(TARGET_M68K) || defined(TARGET_S390X) - ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); + ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); break; #else goto unimplemented; @@ -156,7 +156,7 @@ struct Monitor { int outbuf_index; ReadLineState *rs; MonitorControl *mc; - CPUState *mon_cpu; + CPUArchState *mon_cpu; BlockDriverCompletionFunc *password_completion_cb; void *password_opaque; #ifdef CONFIG_DEBUG_MONITOR @@ -742,7 +742,7 @@ CommandInfoList *qmp_query_commands(Error **errp) /* set the current CPU defined by the user */ int monitor_set_cpu(int cpu_index) { - CPUState *env; + CPUArchState *env; for(env = first_cpu; env != NULL; env = env->next_cpu) { if (env->cpu_index == cpu_index) { @@ -753,7 +753,7 @@ int monitor_set_cpu(int cpu_index) return -1; } -static CPUState *mon_get_cpu(void) +static CPUArchState *mon_get_cpu(void) { if (!cur_mon->mon_cpu) { monitor_set_cpu(0); @@ -769,7 +769,7 @@ int monitor_get_cpu_index(void) static void do_info_registers(Monitor *mon) { - CPUState *env; + CPUArchState *env; env = mon_get_cpu(); #ifdef TARGET_I386 cpu_dump_state(env, (FILE *)mon, monitor_fprintf, @@ -806,7 +806,7 @@ static void do_info_history(Monitor *mon) /* XXX: not implemented in other targets */ static void do_info_cpu_stats(Monitor *mon) { - CPUState *env; + CPUArchState *env; env = mon_get_cpu(); cpu_dump_statistics(env, (FILE *)mon, &monitor_fprintf, 0); @@ -987,7 +987,7 @@ static void monitor_printc(Monitor *mon, int c) static void memory_dump(Monitor *mon, int count, int format, int wsize, target_phys_addr_t addr, int is_physical) { - CPUState *env; + CPUArchState *env; int l, line_size, i, max_digits, len; uint8_t buf[16]; uint64_t v; @@ -1547,7 +1547,7 @@ static void print_pte(Monitor *mon, target_phys_addr_t addr, pte & PG_RW_MASK ? 'W' : '-'); } -static void tlb_info_32(Monitor *mon, CPUState *env) +static void tlb_info_32(Monitor *mon, CPUArchState *env) { unsigned int l1, l2; uint32_t pgd, pde, pte; @@ -1575,7 +1575,7 @@ static void tlb_info_32(Monitor *mon, CPUState *env) } } -static void tlb_info_pae32(Monitor *mon, CPUState *env) +static void tlb_info_pae32(Monitor *mon, CPUArchState *env) { unsigned int l1, l2, l3; uint64_t pdpe, pde, pte; @@ -1615,7 +1615,7 @@ static void tlb_info_pae32(Monitor *mon, CPUState *env) } #ifdef TARGET_X86_64 -static void tlb_info_64(Monitor *mon, CPUState *env) +static void tlb_info_64(Monitor *mon, CPUArchState *env) { uint64_t l1, l2, l3, l4; uint64_t pml4e, pdpe, pde, pte; @@ -1674,7 +1674,7 @@ static void tlb_info_64(Monitor *mon, CPUState *env) static void tlb_info(Monitor *mon) { - CPUState *env; + CPUArchState *env; env = mon_get_cpu(); @@ -1719,7 +1719,7 @@ static void mem_print(Monitor *mon, target_phys_addr_t *pstart, } } -static void mem_info_32(Monitor *mon, CPUState *env) +static void mem_info_32(Monitor *mon, CPUArchState *env) { unsigned int l1, l2; int prot, last_prot; @@ -1760,7 +1760,7 @@ static void mem_info_32(Monitor *mon, CPUState *env) mem_print(mon, &start, &last_prot, (target_phys_addr_t)1 << 32, 0); } -static void mem_info_pae32(Monitor *mon, CPUState *env) +static void mem_info_pae32(Monitor *mon, CPUArchState *env) { unsigned int l1, l2, l3; int prot, last_prot; @@ -1817,7 +1817,7 @@ static void mem_info_pae32(Monitor *mon, CPUState *env) #ifdef TARGET_X86_64 -static void mem_info_64(Monitor *mon, CPUState *env) +static void mem_info_64(Monitor *mon, CPUArchState *env) { int prot, last_prot; uint64_t l1, l2, l3, l4; @@ -1897,7 +1897,7 @@ static void mem_info_64(Monitor *mon, CPUState *env) static void mem_info(Monitor *mon) { - CPUState *env; + CPUArchState *env; env = mon_get_cpu(); @@ -1936,7 +1936,7 @@ static void print_tlb(Monitor *mon, int idx, tlb_t *tlb) static void tlb_info(Monitor *mon) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); int i; monitor_printf (mon, "ITLB:\n"); @@ -1952,7 +1952,7 @@ static void tlb_info(Monitor *mon) #if defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_XTENSA) static void tlb_info(Monitor *mon) { - CPUState *env1 = mon_get_cpu(); + CPUArchState *env1 = mon_get_cpu(); dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1); } @@ -1966,7 +1966,7 @@ static void do_info_mtree(Monitor *mon) static void do_info_numa(Monitor *mon) { int i; - CPUState *env; + CPUArchState *env; monitor_printf(mon, "%d nodes\n", nb_numa_nodes); for (i = 0; i < nb_numa_nodes; i++) { @@ -2173,7 +2173,7 @@ static void do_acl_remove(Monitor *mon, const QDict *qdict) #if defined(TARGET_I386) static void do_inject_mce(Monitor *mon, const QDict *qdict) { - CPUState *cenv; + CPUArchState *cenv; int cpu_index = qdict_get_int(qdict, "cpu_index"); int bank = qdict_get_int(qdict, "bank"); uint64_t status = qdict_get_int(qdict, "status"); @@ -2625,7 +2625,7 @@ typedef struct MonitorDef { #if defined(TARGET_I386) static target_long monitor_get_pc (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return env->eip + env->segs[R_CS].base; } #endif @@ -2633,7 +2633,7 @@ static target_long monitor_get_pc (const struct MonitorDef *md, int val) #if defined(TARGET_PPC) static target_long monitor_get_ccr (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); unsigned int u; int i; @@ -2646,31 +2646,31 @@ static target_long monitor_get_ccr (const struct MonitorDef *md, int val) static target_long monitor_get_msr (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return env->msr; } static target_long monitor_get_xer (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return env->xer; } static target_long monitor_get_decr (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return cpu_ppc_load_decr(env); } static target_long monitor_get_tbu (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return cpu_ppc_load_tbu(env); } static target_long monitor_get_tbl (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return cpu_ppc_load_tbl(env); } #endif @@ -2679,7 +2679,7 @@ static target_long monitor_get_tbl (const struct MonitorDef *md, int val) #ifndef TARGET_SPARC64 static target_long monitor_get_psr (const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return cpu_get_psr(env); } @@ -2687,7 +2687,7 @@ static target_long monitor_get_psr (const struct MonitorDef *md, int val) static target_long monitor_get_reg(const struct MonitorDef *md, int val) { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); return env->regwptr[val]; } #endif @@ -3019,7 +3019,7 @@ static int get_monitor_def(target_long *pval, const char *name) if (md->get_value) { *pval = md->get_value(md, md->offset); } else { - CPUState *env = mon_get_cpu(); + CPUArchState *env = mon_get_cpu(); ptr = (uint8_t *)env + md->offset; switch(md->type) { case MD_I32: @@ -34,7 +34,7 @@ #pragma GCC poison TARGET_PAGE_BITS #pragma GCC poison TARGET_PAGE_ALIGN -#pragma GCC poison CPUState +#pragma GCC poison CPUArchState #pragma GCC poison env #pragma GCC poison lduw_phys diff --git a/softmmu-semi.h b/softmmu-semi.h index 86a9f8a..648cb95 100644 --- a/softmmu-semi.h +++ b/softmmu-semi.h @@ -7,14 +7,14 @@ * This code is licensed under the GPL */ -static inline uint32_t softmmu_tget32(CPUState *env, uint32_t addr) +static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr) { uint32_t val; cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0); return tswap32(val); } -static inline uint32_t softmmu_tget8(CPUState *env, uint32_t addr) +static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr) { uint8_t val; @@ -26,7 +26,7 @@ static inline uint32_t softmmu_tget8(CPUState *env, uint32_t addr) #define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; }) #define get_user_ual(arg, p) get_user_u32(arg, p) -static inline void softmmu_tput32(CPUState *env, uint32_t addr, uint32_t val) +static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val) { val = tswap32(val); cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1); @@ -34,7 +34,7 @@ static inline void softmmu_tput32(CPUState *env, uint32_t addr, uint32_t val) #define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) #define put_user_ual(arg, p) put_user_u32(arg, p) -static void *softmmu_lock_user(CPUState *env, uint32_t addr, uint32_t len, +static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len, int copy) { uint8_t *p; @@ -45,7 +45,7 @@ static void *softmmu_lock_user(CPUState *env, uint32_t addr, uint32_t len, return p; } #define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy) -static char *softmmu_lock_user_string(CPUState *env, uint32_t addr) +static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr) { char *p; char *s; @@ -60,7 +60,7 @@ static char *softmmu_lock_user_string(CPUState *env, uint32_t addr) return s; } #define lock_user_string(p) softmmu_lock_user_string(env, p) -static void softmmu_unlock_user(CPUState *env, void *p, target_ulong addr, +static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr, target_ulong len) { if (len) diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h index ecc2a35..48c0fdc 100644 --- a/target-alpha/cpu.h +++ b/target-alpha/cpu.h @@ -25,7 +25,7 @@ #define TARGET_LONG_BITS 64 -#define CPUState struct CPUAlphaState +#define CPUArchState struct CPUAlphaState #include "cpu-defs.h" diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 2bbb5d1..26c114b 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -23,7 +23,7 @@ #define ELF_MACHINE EM_ARM -#define CPUState struct CPUARMState +#define CPUArchState struct CPUARMState #include "config.h" #include "qemu-common.h" diff --git a/target-cris/cpu.h b/target-cris/cpu.h index f38393a..31899c2 100644 --- a/target-cris/cpu.h +++ b/target-cris/cpu.h @@ -25,7 +25,7 @@ #define TARGET_LONG_BITS 32 -#define CPUState struct CPUCRISState +#define CPUArchState struct CPUCRISState #include "cpu-defs.h" diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 6e26d21..a1ed3e7 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -42,7 +42,7 @@ #define ELF_MACHINE EM_386 #endif -#define CPUState struct CPUX86State +#define CPUArchState struct CPUX86State #include "cpu-defs.h" diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h index 684b2fa..0902a24 100644 --- a/target-lm32/cpu.h +++ b/target-lm32/cpu.h @@ -22,7 +22,7 @@ #define TARGET_LONG_BITS 32 -#define CPUState struct CPULM32State +#define CPUArchState struct CPULM32State #include "config.h" #include "qemu-common.h" diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h index 2c83b89..6696e30 100644 --- a/target-m68k/cpu.h +++ b/target-m68k/cpu.h @@ -22,7 +22,7 @@ #define TARGET_LONG_BITS 32 -#define CPUState struct CPUM68KState +#define CPUArchState struct CPUM68KState #include "config.h" #include "qemu-common.h" diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h index 6ae5649..3b52421 100644 --- a/target-microblaze/cpu.h +++ b/target-microblaze/cpu.h @@ -24,7 +24,7 @@ #define TARGET_LONG_BITS 32 -#define CPUState struct CPUMBState +#define CPUArchState struct CPUMBState #include "cpu-defs.h" #include "softfloat.h" diff --git a/target-mips/cpu.h b/target-mips/cpu.h index 94381ec..7430aa5 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -7,7 +7,7 @@ #define ELF_MACHINE EM_MIPS -#define CPUState struct CPUMIPSState +#define CPUArchState struct CPUMIPSState #include "config.h" #include "qemu-common.h" diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h index 3508d8a..ad09cbe 100644 --- a/target-ppc/cpu.h +++ b/target-ppc/cpu.h @@ -71,7 +71,7 @@ #endif /* defined (TARGET_PPC64) */ -#define CPUState struct CPUPPCState +#define CPUArchState struct CPUPPCState #include "cpu-defs.h" diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index af6cc4e..ea849fc 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -26,7 +26,7 @@ #define ELF_MACHINE EM_S390 -#define CPUState struct CPUS390XState +#define CPUArchState struct CPUS390XState #include "cpu-defs.h" #define TARGET_PAGE_BITS 12 diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h index b45e54f..965536d 100644 --- a/target-sh4/cpu.h +++ b/target-sh4/cpu.h @@ -37,7 +37,7 @@ #define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R) #define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R) -#define CPUState struct CPUSH4State +#define CPUArchState struct CPUSH4State #include "cpu-defs.h" diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h index 2c2cea7..86f9de6 100644 --- a/target-sparc/cpu.h +++ b/target-sparc/cpu.h @@ -23,7 +23,7 @@ # endif #endif -#define CPUState struct CPUSPARCState +#define CPUArchState struct CPUSPARCState #include "cpu-defs.h" diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h index 171f0a9..a3f8589 100644 --- a/target-unicore32/cpu.h +++ b/target-unicore32/cpu.h @@ -18,7 +18,7 @@ #define ELF_MACHINE EM_UNICORE32 -#define CPUState struct CPUUniCore32State +#define CPUArchState struct CPUUniCore32State #include "config.h" #include "qemu-common.h" diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h index b7723ca..a7bcf52 100644 --- a/target-xtensa/cpu.h +++ b/target-xtensa/cpu.h @@ -31,7 +31,7 @@ #define TARGET_LONG_BITS 32 #define ELF_MACHINE EM_XTENSA -#define CPUState struct CPUXtensaState +#define CPUArchState struct CPUXtensaState #include "config.h" #include "qemu-common.h" diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index 5b233f5..5af21b3 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -990,10 +990,10 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the - * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] + * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_read))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and * not exceed otherwise, so use an - * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) + * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table) * before. */ if (mem_index) @@ -1001,7 +1001,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_read)); + offsetof(CPUArchState, tlb_table[0][0].addr_read)); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ @@ -1012,12 +1012,12 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) /* XXX: possibly we could use a block data load or writeback in * the first access. */ tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_read) + 4); + offsetof(CPUArchState, tlb_table[0][0].addr_read) + 4); tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addend)); + offsetof(CPUArchState, tlb_table[0][0].addend)); switch (opc) { case 0: @@ -1210,10 +1210,10 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the - * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] + * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_write))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and * not exceed otherwise, so use an - * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) + * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table) * before. */ if (mem_index) @@ -1221,7 +1221,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_write)); + offsetof(CPUArchState, tlb_table[0][0].addr_write)); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ @@ -1232,12 +1232,12 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) /* XXX: possibly we could use a block data load or writeback in * the first access. */ tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_write) + 4); + offsetof(CPUArchState, tlb_table[0][0].addr_write) + 4); tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addend)); + offsetof(CPUArchState, tlb_table[0][0].addend)); switch (opc) { case 0: @@ -1797,7 +1797,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); tcg_add_target_add_op_defs(arm_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c index 71f4a8a..c5a3730 100644 --- a/tcg/hppa/tcg-target.c +++ b/tcg/hppa/tcg-target.c @@ -1040,13 +1040,13 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) lab1 = gen_new_label(); lab2 = gen_new_label(); - offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read); + offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg, opc & 3, lab1, offset); /* TLB Hit. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25), - offsetof(CPUState, tlb_table[mem_index][0].addend) - offset); + offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset); tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc); tcg_out_branch(s, lab2, 1); @@ -1155,13 +1155,13 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) lab1 = gen_new_label(); lab2 = gen_new_label(); - offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write); + offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg, opc, lab1, offset); /* TLB Hit. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25), - offsetof(CPUState, tlb_table[mem_index][0].addend) - offset); + offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset); /* There are no indexed stores, so we must do this addition explitly. Careful to avoid R20, which is used for the bswaps to follow. */ diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index 1dbe240..fafd900 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1031,7 +1031,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0, - offsetof(CPUState, tlb_table[mem_index][0]) + offsetof(CPUArchState, tlb_table[mem_index][0]) + which); /* cmp 0(r1), r0 */ diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c index e3de79f..f90252a 100644 --- a/tcg/ia64/tcg-target.c +++ b/tcg/ia64/tcg-target.c @@ -1479,8 +1479,8 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) /* Read the TLB entry */ tcg_out_qemu_tlb(s, addr_reg, s_bits, - offsetof(CPUState, tlb_table[mem_index][0].addr_read), - offsetof(CPUState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read), + offsetof(CPUArchState, tlb_table[mem_index][0].addend)); /* P6 is the fast path, and P7 the slow path */ tcg_out_bundle(s, mLX, @@ -1570,8 +1570,8 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) #endif tcg_out_qemu_tlb(s, addr_reg, opc, - offsetof(CPUState, tlb_table[mem_index][0].addr_write), - offsetof(CPUState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_write), + offsetof(CPUArchState, tlb_table[mem_index][0].addend)); /* P6 is the fast path, and P7 the slow path */ tcg_out_bundle(s, mLX, @@ -2368,6 +2368,6 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6); tcg_add_target_add_op_defs(ia64_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c index c5c3282..c6aa5bc 100644 --- a/tcg/mips/tcg-target.c +++ b/tcg/mips/tcg-target.c @@ -827,7 +827,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_meml); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_meml); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); @@ -837,7 +837,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, tcg_out_nop(s); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_memh); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + addr_memh); label1_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT); @@ -893,7 +893,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addend)); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl); #else if (GUEST_BASE == (int16_t)GUEST_BASE) { @@ -1013,7 +1013,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_meml); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_meml); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); @@ -1023,7 +1023,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, tcg_out_nop(s); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addr_write) + addr_memh); + offsetof(CPUArchState, tlb_table[mem_index][0].addr_write) + addr_memh); label1_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT); @@ -1080,7 +1080,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, - offsetof(CPUState, tlb_table[mem_index][0].addend)); + offsetof(CPUArchState, tlb_table[mem_index][0].addend)); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, addr_regl); #else if (GUEST_BASE == (int16_t)GUEST_BASE) { @@ -1529,6 +1529,6 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ tcg_add_target_add_op_defs(mips_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index f5d9bf3..6a34cab 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -564,7 +564,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) tcg_out32 (s, (LWZU | RT (r1) | RA (r0) - | offsetof (CPUState, tlb_table[mem_index][0].addr_read) + | offsetof (CPUArchState, tlb_table[mem_index][0].addr_read) ) ); tcg_out32 (s, (RLWINM @@ -760,7 +760,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) tcg_out32 (s, (LWZU | RT (r1) | RA (r0) - | offsetof (CPUState, tlb_table[mem_index][0].addr_write) + | offsetof (CPUArchState, tlb_table[mem_index][0].addr_write) ) ); tcg_out32 (s, (RLWINM diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index 4419378..7f723b5 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -635,7 +635,7 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) rbase = 0; tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits, - offsetof (CPUState, tlb_table[mem_index][0].addr_read)); + offsetof (CPUArchState, tlb_table[mem_index][0].addr_read)); tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L); @@ -782,7 +782,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) rbase = 0; tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc, - offsetof (CPUState, tlb_table[mem_index][0].addr_write)); + offsetof (CPUArchState, tlb_table[mem_index][0].addr_write)); tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L); diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index 9317fe8..47ffcc1 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -1439,9 +1439,9 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg, tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); if (is_store) { - ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_write); + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); } else { - ofs = offsetof(CPUState, tlb_table[mem_index][0].addr_read); + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); } assert(ofs < 0x80000); @@ -1515,7 +1515,7 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg, *(label1_ptr + 1) = ((unsigned long)s->code_ptr - (unsigned long)label1_ptr) >> 1; - ofs = offsetof(CPUState, tlb_table[mem_index][0].addend); + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend); assert(ofs < 0x80000); tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs); @@ -2293,7 +2293,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_add_target_add_op_defs(s390_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index 4461fb4..b287122 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -776,7 +776,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); /* add arg1, x, arg1 */ - tcg_out_addi(s, arg1, offsetof(CPUState, + tcg_out_addi(s, arg1, offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)); /* add env, arg1, arg1 */ @@ -988,7 +988,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); /* add arg1, x, arg1 */ - tcg_out_addi(s, arg1, offsetof(CPUState, + tcg_out_addi(s, arg1, offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); /* add env, arg1, arg1 */ diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c index fc0880c..bd85073 100644 --- a/tcg/tci/tcg-target.c +++ b/tcg/tci/tcg-target.c @@ -891,7 +891,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_add_target_add_op_defs(tcg_target_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 81fcc0f..b61e99a 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -154,7 +154,7 @@ typedef enum { void tci_disas(uint8_t opc); -unsigned long tcg_qemu_tb_exec(CPUState *env, uint8_t *tb_ptr); +unsigned long tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); #define tcg_qemu_tb_exec tcg_qemu_tb_exec static inline void flush_icache_range(tcg_target_ulong start, @@ -52,7 +52,7 @@ typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong, /* TCI can optionally use a global register variable for env. */ #if !defined(AREG0) -CPUState *env; +CPUArchState *env; #endif /* Targets which don't use GETPC also don't need tci_tb_ptr @@ -429,7 +429,7 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) } /* Interpret pseudo code in tb. */ -unsigned long tcg_qemu_tb_exec(CPUState *cpustate, uint8_t *tb_ptr) +unsigned long tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) { unsigned long next_tb = 0; diff --git a/translate-all.c b/translate-all.c index 041c108..8c7d303 100644 --- a/translate-all.c +++ b/translate-all.c @@ -51,7 +51,7 @@ void cpu_gen_init(void) '*gen_code_size_ptr' contains the size of the generated code (host code). */ -int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) +int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) { TCGContext *s = &tcg_ctx; uint8_t *gen_code_buf; @@ -109,7 +109,7 @@ int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) /* The cpu state corresponding to 'searched_pc' is restored. */ int cpu_restore_state(TranslationBlock *tb, - CPUState *env, unsigned long searched_pc) + CPUArchState *env, unsigned long searched_pc) { TCGContext *s = &tcg_ctx; int j; diff --git a/user-exec.c b/user-exec.c index abf6885..cd905ff 100644 --- a/user-exec.c +++ b/user-exec.c @@ -38,7 +38,7 @@ //#define DEBUG_SIGNAL -static void exception_action(CPUState *env1) +static void exception_action(CPUArchState *env1) { #if defined(TARGET_I386) raise_exception_err_env(env1, env1->exception_index, env1->error_code); @@ -50,7 +50,7 @@ static void exception_action(CPUState *env1) /* exit the current TB from a signal handler. The host registers are restored in a state compatible with the CPU emulator */ -void cpu_resume_from_signal(CPUState *env1, void *puc) +void cpu_resume_from_signal(CPUArchState *env1, void *puc) { #ifdef __linux__ struct ucontext *uc = puc; @@ -530,14 +530,14 @@ static MemoryListener xen_memory_listener = { static void xen_reset_vcpu(void *opaque) { - CPUState *env = opaque; + CPUArchState *env = opaque; env->halted = 1; } void xen_vcpu_init(void) { - CPUState *first_cpu; + CPUArchState *first_cpu; if ((first_cpu = qemu_get_cpu(0))) { qemu_register_reset(xen_reset_vcpu, first_cpu); |