aboutsummaryrefslogtreecommitdiff
path: root/include/exec/cpu-common.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/exec/cpu-common.h')
-rw-r--r--include/exec/cpu-common.h72
1 files changed, 10 insertions, 62 deletions
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 3771b21..e0be4ee 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -9,9 +9,7 @@
#define CPU_COMMON_H
#include "exec/vaddr.h"
-#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
-#endif
#include "hw/core/cpu.h"
#include "tcg/debug-assert.h"
#include "exec/page-protection.h"
@@ -40,20 +38,12 @@ int cpu_get_free_index(void);
void tcg_iommu_init_notifier_list(CPUState *cpu);
void tcg_iommu_free_notifier_list(CPUState *cpu);
-#if !defined(CONFIG_USER_ONLY)
-
enum device_endian {
DEVICE_NATIVE_ENDIAN,
DEVICE_BIG_ENDIAN,
DEVICE_LITTLE_ENDIAN,
};
-#if HOST_BIG_ENDIAN
-#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
-#else
-#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
-#endif
-
/* address in the RAM (different from a physical address) */
#if defined(CONFIG_XEN_BACKEND)
typedef uint64_t ram_addr_t;
@@ -95,6 +85,7 @@ void qemu_ram_unset_idstr(RAMBlock *block);
const char *qemu_ram_get_idstr(RAMBlock *rb);
void *qemu_ram_get_host_addr(RAMBlock *rb);
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
+ram_addr_t qemu_ram_get_fd_offset(RAMBlock *rb);
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
bool qemu_ram_is_shared(RAMBlock *rb);
@@ -132,34 +123,22 @@ size_t qemu_ram_pagesize_largest(void);
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr);
/**
- * cpu_address_space_destroy:
- * @cpu: CPU for which address space needs to be destroyed
- * @asidx: integer index of this address space
+ * cpu_destroy_address_spaces:
+ * @cpu: CPU for which address spaces need to be destroyed
*
- * Note that with KVM only one address space is supported.
+ * Destroy all address spaces associated with this CPU; this
+ * is called as part of unrealizing the CPU.
*/
-void cpu_address_space_destroy(CPUState *cpu, int asidx);
+void cpu_destroy_address_spaces(CPUState *cpu);
-void cpu_physical_memory_rw(hwaddr addr, void *buf,
- hwaddr len, bool is_write);
-static inline void cpu_physical_memory_read(hwaddr addr,
- void *buf, hwaddr len)
-{
- cpu_physical_memory_rw(addr, buf, len, false);
-}
-static inline void cpu_physical_memory_write(hwaddr addr,
- const void *buf, hwaddr len)
-{
- cpu_physical_memory_rw(addr, (void *)buf, len, true);
-}
+void cpu_physical_memory_read(hwaddr addr, void *buf, hwaddr len);
+void cpu_physical_memory_write(hwaddr addr, const void *buf, hwaddr len);
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
bool is_write, hwaddr access_len);
-bool cpu_physical_memory_is_io(hwaddr phys_addr);
-
/* Coalesced MMIO regions are areas where write operations can be reordered.
* This usually implies that write operations are side-effect free. This allows
* batching which can make a major impact on performance when using
@@ -167,20 +146,9 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
*/
void qemu_flush_coalesced_mmio_buffer(void);
-void cpu_flush_icache_range(hwaddr start, hwaddr len);
-
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
-int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
-int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
- size_t length);
-
-#endif
-
-/* Returns: 0 on success, -1 on error */
-int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
- void *ptr, size_t len, bool is_write);
/* vl.c */
void list_cpus(void);
@@ -194,7 +162,7 @@ void list_cpus(void);
* @host_pc: the host pc within the translation
* @data: output data
*
- * Attempt to load the the unwind state for a host pc occurring in
+ * Attempt to load the unwind state for a host pc occurring in
* translated code. If @host_pc is not in translated code, the
* function returns false; otherwise @data is loaded.
* This is the same unwind info as given to restore_state_to_opc.
@@ -232,9 +200,9 @@ static inline bool cpu_loop_exit_requested(CPUState *cpu)
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
+G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#endif /* CONFIG_TCG */
G_NORETURN void cpu_loop_exit(CPUState *cpu);
-G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
/* accel/tcg/cpu-exec.c */
int cpu_exec(CPUState *cpu);
@@ -272,24 +240,4 @@ static inline CPUState *env_cpu(CPUArchState *env)
return (CPUState *)env_cpu_const(env);
}
-#ifndef CONFIG_USER_ONLY
-/**
- * cpu_mmu_index:
- * @env: The cpu environment
- * @ifetch: True for code access, false for data access.
- *
- * Return the core mmu index for the current translation regime.
- * This function is used by generic TCG code paths.
- *
- * The user-only version of this function is inline in cpu-all.h,
- * where it always returns MMU_USER_IDX.
- */
-static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- int ret = cs->cc->mmu_index(cs, ifetch);
- tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES);
- return ret;
-}
-#endif /* !CONFIG_USER_ONLY */
-
#endif /* CPU_COMMON_H */