diff options
Diffstat (limited to 'include')
33 files changed, 731 insertions, 248 deletions
diff --git a/include/block/block.h b/include/block/block.h index 107c603..398a050 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -314,17 +314,11 @@ BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs, - int64_t offset, int count, - BlockCompletionFunc *cb, void *opaque); void bdrv_aio_cancel(BlockAIOCB *acb); void bdrv_aio_cancel_async(BlockAIOCB *acb); /* sg packet commands */ -int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); -BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, - unsigned long int req, void *buf, - BlockCompletionFunc *cb, void *opaque); +int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); /* Invalidate any cached metadata used by image formats */ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); diff --git a/include/block/block_int.h b/include/block/block_int.h index 3e79228..e96e9ad 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -244,6 +244,8 @@ struct BlockDriver { BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque); + int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs, + unsigned long int req, void *buf); /* List of options for creating images, terminated by name == NULL */ QemuOptsList *create_opts; diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index ee3388f..9dea14b 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -8,6 +8,9 @@ BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, uint32_t granularity, const char *name, Error **errp); +void bdrv_create_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap, + int chunk_size); +void bdrv_release_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap); int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); @@ -27,8 +30,11 @@ void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs); uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap); +uint32_t bdrv_dirty_bitmap_meta_granularity(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap); +const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap); +int64_t bdrv_dirty_bitmap_size(const BdrvDirtyBitmap *bitmap); DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap); int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector); @@ -36,9 +42,34 @@ void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int64_t nr_sectors); void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int64_t nr_sectors); -void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); -void bdrv_set_dirty_iter(struct HBitmapIter *hbi, int64_t offset); +int bdrv_dirty_bitmap_get_meta(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, int64_t sector, + int nb_sectors); +void bdrv_dirty_bitmap_reset_meta(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, int64_t sector, + int nb_sectors); +BdrvDirtyBitmapIter *bdrv_dirty_meta_iter_new(BdrvDirtyBitmap *bitmap); +BdrvDirtyBitmapIter *bdrv_dirty_iter_new(BdrvDirtyBitmap *bitmap, + uint64_t first_sector); +void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter); +int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter); +void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *hbi, int64_t sector_num); int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); +int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_truncate(BlockDriverState *bs); +uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap, + uint64_t start, uint64_t count); +uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap); +void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap, + uint8_t *buf, uint64_t start, + uint64_t count); +void bdrv_dirty_bitmap_deserialize_part(BdrvDirtyBitmap *bitmap, + uint8_t *buf, uint64_t start, + uint64_t count, bool finish); +void bdrv_dirty_bitmap_deserialize_zeroes(BdrvDirtyBitmap *bitmap, + uint64_t start, uint64_t count, + bool finish); +void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap); + #endif diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index b6a7059..e9004e5 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -31,6 +31,7 @@ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ +#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ /* some important defines: * @@ -189,6 +190,15 @@ void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, /* page related stuff */ +#ifdef TARGET_PAGE_BITS_VARY +extern bool target_page_bits_decided; +extern int target_page_bits; +#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \ + target_page_bits; }) +#else +#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS +#endif + #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 336a57c..cb624e4 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -57,9 +57,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu, uint32_t flags, int cflags); -void cpu_exec_init(CPUState *cpu, Error **errp); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); +void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); #if !defined(CONFIG_USER_ONLY) void cpu_reloading_memory_map(void); diff --git a/include/exec/memory.h b/include/exec/memory.h index 10d7eac..79ccaab 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -255,8 +255,9 @@ struct MemoryListener { hwaddr addr, hwaddr len); /* Lower = earlier (during add), later (during del) */ unsigned priority; - AddressSpace *address_space_filter; + AddressSpace *address_space; QTAILQ_ENTRY(MemoryListener) link; + QTAILQ_ENTRY(MemoryListener) link_as; }; /** @@ -278,7 +279,7 @@ struct AddressSpace { struct AddressSpaceDispatch *dispatch; struct AddressSpaceDispatch *next_dispatch; MemoryListener dispatch_listener; - + QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; }; diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index 9c1b7cb..d1d1d61 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -343,6 +343,24 @@ struct AcpiMadtLocalNmi { } QEMU_PACKED; typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi; +struct AcpiMadtProcessorX2Apic { + ACPI_SUB_HEADER_DEF + uint16_t reserved; + uint32_t x2apic_id; /* Processor's local x2APIC ID */ + uint32_t flags; + uint32_t uid; /* Processor object _UID */ +} QEMU_PACKED; +typedef struct AcpiMadtProcessorX2Apic AcpiMadtProcessorX2Apic; + +struct AcpiMadtLocalX2ApicNmi { + ACPI_SUB_HEADER_DEF + uint16_t flags; /* MPS INTI flags */ + uint32_t uid; /* Processor object _UID */ + uint8_t lint; /* Local APIC LINT# */ + uint8_t reserved[3]; /* Local APIC LINT# */ +} QEMU_PACKED; +typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi; + struct AcpiMadtGenericInterrupt { ACPI_SUB_HEADER_DEF uint16_t reserved; @@ -485,6 +503,17 @@ struct AcpiSratProcessorAffinity } QEMU_PACKED; typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity; +struct AcpiSratProcessorX2ApicAffinity { + ACPI_SUB_HEADER_DEF + uint16_t reserved; + uint32_t proximity_domain; + uint32_t x2apic_id; + uint32_t flags; + uint32_t clk_domain; + uint32_t reserved2; +} QEMU_PACKED; +typedef struct AcpiSratProcessorX2ApicAffinity AcpiSratProcessorX2ApicAffinity; + struct AcpiSratMemoryAffinity { ACPI_SUB_HEADER_DEF @@ -609,4 +638,72 @@ typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit; /* Masks for Flags field above */ #define ACPI_DMAR_INCLUDE_PCI_ALL 1 +/* + * Input Output Remapping Table (IORT) + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049B, October 2015 + */ + +struct AcpiIortTable { + ACPI_TABLE_HEADER_DEF /* ACPI common table header */ + uint32_t node_count; + uint32_t node_offset; + uint32_t reserved; +} QEMU_PACKED; +typedef struct AcpiIortTable AcpiIortTable; + +/* + * IORT node types + */ + +#define ACPI_IORT_NODE_HEADER_DEF /* Node format common fields */ \ + uint8_t type; \ + uint16_t length; \ + uint8_t revision; \ + uint32_t reserved; \ + uint32_t mapping_count; \ + uint32_t mapping_offset; + +/* Values for node Type above */ +enum { + ACPI_IORT_NODE_ITS_GROUP = 0x00, + ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, + ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, + ACPI_IORT_NODE_SMMU = 0x03, + ACPI_IORT_NODE_SMMU_V3 = 0x04 +}; + +struct AcpiIortIdMapping { + uint32_t input_base; + uint32_t id_count; + uint32_t output_base; + uint32_t output_reference; + uint32_t flags; +} QEMU_PACKED; +typedef struct AcpiIortIdMapping AcpiIortIdMapping; + +struct AcpiIortMemoryAccess { + uint32_t cache_coherency; + uint8_t hints; + uint16_t reserved; + uint8_t memory_flags; +} QEMU_PACKED; +typedef struct AcpiIortMemoryAccess AcpiIortMemoryAccess; + +struct AcpiIortItsGroup { + ACPI_IORT_NODE_HEADER_DEF + uint32_t its_count; + uint32_t identifiers[0]; +} QEMU_PACKED; +typedef struct AcpiIortItsGroup AcpiIortItsGroup; + +struct AcpiIortRC { + ACPI_IORT_NODE_HEADER_DEF + AcpiIortMemoryAccess memory_properties; + uint32_t ats_attribute; + uint32_t pci_segment_number; + AcpiIortIdMapping id_mapping_array[0]; +} QEMU_PACKED; +typedef struct AcpiIortRC AcpiIortRC; + #endif diff --git a/include/hw/boards.h b/include/hw/boards.h index e46a744..a51da9c 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -86,6 +86,12 @@ typedef struct { * Returns a @HotpluggableCPUList, which describes CPUs objects which * could be added with -device/device_add. * Caller is responsible for freeing returned list. + * @minimum_page_bits: + * If non-zero, the board promises never to create a CPU with a page size + * smaller than this, so QEMU can use a more efficient larger page + * size than the target architecture's minimum. (Attempting to create + * such a CPU will fail.) Note that changing this is a migration + * compatibility break for the machine. */ struct MachineClass { /*< private >*/ @@ -124,6 +130,7 @@ struct MachineClass { ram_addr_t default_ram_size; bool option_rom_has_mr; bool rom_file_has_mr; + int minimum_page_bits; HotplugHandler *(*get_hotplug_handler)(MachineState *machine, DeviceState *dev); diff --git a/include/hw/char/bcm2835_aux.h b/include/hw/char/bcm2835_aux.h index 42f0ee7..6865f15 100644 --- a/include/hw/char/bcm2835_aux.h +++ b/include/hw/char/bcm2835_aux.h @@ -22,7 +22,7 @@ typedef struct { /*< public >*/ MemoryRegion iomem; - CharDriverState *chr; + CharBackend chr; qemu_irq irq; uint8_t read_fifo[BCM2835_AUX_RX_FIFO_LEN]; diff --git a/include/hw/char/cadence_uart.h b/include/hw/char/cadence_uart.h index a12773c..ca75eb5 100644 --- a/include/hw/char/cadence_uart.h +++ b/include/hw/char/cadence_uart.h @@ -44,7 +44,7 @@ typedef struct { uint32_t rx_count; uint32_t tx_count; uint64_t char_tx_time; - CharDriverState *chr; + CharBackend chr; qemu_irq irq; QEMUTimer *fifo_trigger_handle; } CadenceUARTState; diff --git a/include/hw/char/digic-uart.h b/include/hw/char/digic-uart.h index 7b3f145..340c8e1 100644 --- a/include/hw/char/digic-uart.h +++ b/include/hw/char/digic-uart.h @@ -19,6 +19,7 @@ #define HW_CHAR_DIGIC_UART_H #include "hw/sysbus.h" +#include "sysemu/char.h" #define TYPE_DIGIC_UART "digic-uart" #define DIGIC_UART(obj) \ @@ -37,7 +38,7 @@ typedef struct DigicUartState { /*< public >*/ MemoryRegion regs_region; - CharDriverState *chr; + CharBackend chr; uint32_t reg_rx; uint32_t reg_st; diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h index 6cd75c0..4cc3fbc 100644 --- a/include/hw/char/imx_serial.h +++ b/include/hw/char/imx_serial.h @@ -19,6 +19,7 @@ #define IMX_SERIAL_H #include "hw/sysbus.h" +#include "sysemu/char.h" #define TYPE_IMX_SERIAL "imx.serial" #define IMX_SERIAL(obj) OBJECT_CHECK(IMXSerialState, (obj), TYPE_IMX_SERIAL) @@ -96,7 +97,7 @@ typedef struct IMXSerialState { uint32_t ucr3; qemu_irq irq; - CharDriverState *chr; + CharBackend chr; } IMXSerialState; #endif diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h index a4fd3d5..c928d7d 100644 --- a/include/hw/char/serial.h +++ b/include/hw/char/serial.h @@ -28,8 +28,10 @@ #include "hw/hw.h" #include "sysemu/sysemu.h" +#include "sysemu/char.h" #include "exec/memory.h" #include "qemu/fifo8.h" +#include "sysemu/char.h" #define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */ @@ -52,7 +54,7 @@ struct SerialState { it can be reset while reading iir */ int thr_ipending; qemu_irq irq; - CharDriverState *chr; + CharBackend chr; int last_break_enable; int it_shift; int baudbase; @@ -94,6 +96,6 @@ SerialState *serial_mm_init(MemoryRegion *address_space, /* serial-isa.c */ #define TYPE_ISA_SERIAL "isa-serial" -void serial_hds_isa_init(ISABus *bus, int n); +void serial_hds_isa_init(ISABus *bus, int from, int to); #endif diff --git a/include/hw/char/stm32f2xx_usart.h b/include/hw/char/stm32f2xx_usart.h index b97f192..3267523 100644 --- a/include/hw/char/stm32f2xx_usart.h +++ b/include/hw/char/stm32f2xx_usart.h @@ -67,7 +67,7 @@ typedef struct { uint32_t usart_cr3; uint32_t usart_gtpr; - CharDriverState *chr; + CharBackend chr; qemu_irq irq; } STM32F2XXUsartState; #endif /* HW_STM32F2XX_USART_H */ diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index cdd11fb..1209eb4 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -160,7 +160,8 @@ struct APICCommonState { MemoryRegion io_memory; X86CPU *cpu; uint32_t apicbase; - uint8_t id; + uint8_t id; /* legacy APIC ID */ + uint32_t initial_apic_id; uint8_t version; uint8_t arb_id; uint8_t tpr; diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index b16c448..17fff80 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -37,6 +37,7 @@ /** * PCMachineState: * @acpi_dev: link to ACPI PM device that performs ACPI hotplug handling + * @boot_cpus_le: number of present VCPUs, referenced by 'etc/boot-cpus' fw_cfg */ struct PCMachineState { /*< private >*/ @@ -69,6 +70,7 @@ struct PCMachineState { bool apic_xrupt_override; unsigned apic_id_limit; CPUArchIdList *possible_cpus; + uint16_t boot_cpus_le; /* NUMA information: */ uint64_t numa_nodes; diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h index 26c7fdc..48cccbd 100644 --- a/include/hw/ptimer.h +++ b/include/hw/ptimer.h @@ -35,6 +35,26 @@ */ #define PTIMER_POLICY_DEFAULT 0 +/* Periodic timer counter stays with "0" for a one period before wrapping + * around. */ +#define PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD (1 << 0) + +/* Running periodic timer that has counter = limit = 0 would continuously + * re-trigger every period. */ +#define PTIMER_POLICY_CONTINUOUS_TRIGGER (1 << 1) + +/* Starting to run with/setting counter to "0" won't trigger immediately, + * but after a one period for both oneshot and periodic modes. */ +#define PTIMER_POLICY_NO_IMMEDIATE_TRIGGER (1 << 2) + +/* Starting to run with/setting counter to "0" won't re-load counter + * immediately, but after a one period. */ +#define PTIMER_POLICY_NO_IMMEDIATE_RELOAD (1 << 3) + +/* Make counter value of the running timer represent the actual value and + * not the one less. */ +#define PTIMER_POLICY_NO_COUNTER_ROUND_DOWN (1 << 4) + /* ptimer.c */ typedef struct ptimer_state ptimer_state; typedef void (*ptimer_cb)(void *opaque); diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 2a9d2f9..306bbab 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -146,7 +146,7 @@ extern PropertyInfo qdev_prop_arraylen; DEFINE_PROP(_n, _s, _f, qdev_prop_ptr, void*) #define DEFINE_PROP_CHR(_n, _s, _f) \ - DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharDriverState*) + DEFINE_PROP(_n, _s, _f, qdev_prop_chr, CharBackend) #define DEFINE_PROP_STRING(_n, _s, _f) \ DEFINE_PROP(_n, _s, _f, qdev_prop_string, char*) #define DEFINE_PROP_NETDEV(_n, _s, _f) \ diff --git a/include/hw/timer/arm_mptimer.h b/include/hw/timer/arm_mptimer.h index b34cba0..c46d8d2 100644 --- a/include/hw/timer/arm_mptimer.h +++ b/include/hw/timer/arm_mptimer.h @@ -27,12 +27,9 @@ /* State of a single timer or watchdog block */ typedef struct { - uint32_t count; - uint32_t load; uint32_t control; uint32_t status; - int64_t tick; - QEMUTimer *timer; + struct ptimer_state *timer; qemu_irq irq; MemoryRegion iomem; } TimerBlock; diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h index 71b8eb0..fe9a4c5 100644 --- a/include/qapi/qmp/qdict.h +++ b/include/qapi/qmp/qdict.h @@ -73,6 +73,7 @@ void qdict_flatten(QDict *qdict); void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start); void qdict_array_split(QDict *src, QList **dst); int qdict_array_entries(QDict *src, const char *subqdict); +QObject *qdict_crumple(const QDict *src, Error **errp); void qdict_join(QDict *dest, QDict *src, bool overwrite); diff --git a/include/qapi/qmp-input-visitor.h b/include/qapi/qobject-input-visitor.h index f3ff5f3..cde328d 100644 --- a/include/qapi/qmp-input-visitor.h +++ b/include/qapi/qobject-input-visitor.h @@ -11,20 +11,20 @@ * */ -#ifndef QMP_INPUT_VISITOR_H -#define QMP_INPUT_VISITOR_H +#ifndef QOBJECT_INPUT_VISITOR_H +#define QOBJECT_INPUT_VISITOR_H #include "qapi/visitor.h" #include "qapi/qmp/qobject.h" -typedef struct QmpInputVisitor QmpInputVisitor; +typedef struct QObjectInputVisitor QObjectInputVisitor; /* - * Return a new input visitor that converts QMP to QAPI. + * Return a new input visitor that converts a QObject to a QAPI object. * * Set @strict to reject a parse that doesn't consume all keys of a * dictionary; otherwise excess input is ignored. */ -Visitor *qmp_input_visitor_new(QObject *obj, bool strict); +Visitor *qobject_input_visitor_new(QObject *obj, bool strict); #endif diff --git a/include/qapi/qmp-output-visitor.h b/include/qapi/qobject-output-visitor.h index 040fdda..8241877 100644 --- a/include/qapi/qmp-output-visitor.h +++ b/include/qapi/qobject-output-visitor.h @@ -11,20 +11,20 @@ * */ -#ifndef QMP_OUTPUT_VISITOR_H -#define QMP_OUTPUT_VISITOR_H +#ifndef QOBJECT_OUTPUT_VISITOR_H +#define QOBJECT_OUTPUT_VISITOR_H #include "qapi/visitor.h" #include "qapi/qmp/qobject.h" -typedef struct QmpOutputVisitor QmpOutputVisitor; +typedef struct QObjectOutputVisitor QObjectOutputVisitor; /* - * Create a new QMP output visitor. + * Create a new QObject output visitor. * * If everything else succeeds, pass @result to visit_complete() to * collect the result of the visit. */ -Visitor *qmp_output_visitor_new(QObject **result); +Visitor *qobject_output_visitor_new(QObject **result); #endif diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index 6c77a91..9bb6cba 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -25,14 +25,14 @@ * for doing work at each node of a QAPI graph; it can also be used * for a virtual walk, where there is no actual QAPI C struct. * - * There are four kinds of visitor classes: input visitors (QMP, + * There are four kinds of visitor classes: input visitors (QObject, * string, and QemuOpts) parse an external representation and build - * the corresponding QAPI graph, output visitors (QMP and string) take + * the corresponding QAPI graph, output visitors (QObject and string) take * a completed QAPI graph and generate an external representation, the * dealloc visitor can take a QAPI graph (possibly partially * constructed) and recursively free its resources, and the clone * visitor performs a deep clone of one QAPI object to another. While - * the dealloc and QMP input/output visitors are general, the string, + * the dealloc and QObject input/output visitors are general, the string, * QemuOpts, and clone visitors have some implementation limitations; * see the documentation for each visitor for more details on what it * supports. Also, see visitor-impl.h for the callback contracts diff --git a/include/qemu-common.h b/include/qemu-common.h index 9e8b0bd..1430390 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -80,6 +80,19 @@ void tcg_exec_init(unsigned long tb_size); bool tcg_enabled(void); void cpu_exec_init_all(void); +void cpu_exec_step_atomic(CPUState *cpu); + +/** + * set_preferred_target_page_bits: + * @bits: number of bits needed to represent an address within the page + * + * Set the preferred target page size (the actual target page + * size may be smaller than any given CPU's preference). + * Returns true on success, false on failure (which can only happen + * if this is called after the system has already finalized its + * choice of page size and the requested page size is smaller than that). + */ +bool set_preferred_target_page_bits(int bits); /** * Sends a (part of) iovec down a socket, yielding when the socket is full, or diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index c4f6950..878fa07 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -72,16 +72,16 @@ * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends(). */ -#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); }) -#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); }) -#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); }) +#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); }) +#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); }) +#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); }) /* Most compilers currently treat consume and acquire the same, but really * no processors except Alpha need a barrier here. Leave it in if * using Thread Sanitizer to avoid warnings, otherwise optimize it away. */ #if defined(__SANITIZE_THREAD__) -#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) +#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) #elif defined(__alpha__) #define smp_read_barrier_depends() asm volatile("mb":::"memory") #else @@ -99,15 +99,21 @@ * no effect on the generated code but not using the atomic primitives * will get flagged by sanitizers as a violation. */ +#define atomic_read__nocheck(ptr) \ + __atomic_load_n(ptr, __ATOMIC_RELAXED) + #define atomic_read(ptr) \ ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - __atomic_load_n(ptr, __ATOMIC_RELAXED); \ + atomic_read__nocheck(ptr); \ }) +#define atomic_set__nocheck(ptr, i) \ + __atomic_store_n(ptr, i, __ATOMIC_RELAXED) + #define atomic_set(ptr, i) do { \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - __atomic_store_n(ptr, i, __ATOMIC_RELAXED); \ + atomic_set__nocheck(ptr, i); \ } while(0) /* See above: most compilers currently treat consume and acquire the @@ -135,62 +141,43 @@ __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) -/* atomic_mb_read/set semantics map Java volatile variables. They are - * less expensive on some platforms (notably POWER & ARMv7) than fully - * sequentially consistent operations. - * - * As long as they are used as paired operations they are safe to - * use. See docs/atomic.txt for more discussion. - */ - -#if defined(_ARCH_PPC) -#define atomic_mb_read(ptr) \ +#define atomic_load_acquire(ptr) \ ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ typeof_strip_qual(*ptr) _val; \ - __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \ - smp_rmb(); \ + __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ _val; \ }) -#define atomic_mb_set(ptr, i) do { \ +#define atomic_store_release(ptr, i) do { \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - smp_wmb(); \ - __atomic_store_n(ptr, i, __ATOMIC_RELAXED); \ - smp_mb(); \ + __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ } while(0) -#else -#define atomic_mb_read(ptr) \ - ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - typeof_strip_qual(*ptr) _val; \ - __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \ - _val; \ - }) - -#define atomic_mb_set(ptr, i) do { \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - __atomic_store_n(ptr, i, __ATOMIC_SEQ_CST); \ -} while(0) -#endif /* All the remaining operations are fully sequentially consistent */ +#define atomic_xchg__nocheck(ptr, i) ({ \ + __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ +}) + #define atomic_xchg(ptr, i) ({ \ QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ - __atomic_exchange_n(ptr, i, __ATOMIC_SEQ_CST); \ + atomic_xchg__nocheck(ptr, i); \ }) /* Returns the eventual value, failed or not */ -#define atomic_cmpxchg(ptr, old, new) \ - ({ \ - QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ +#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \ typeof_strip_qual(*ptr) _old = (old); \ __atomic_compare_exchange_n(ptr, &_old, new, false, \ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ _old; \ - }) +}) + +#define atomic_cmpxchg(ptr, old, new) ({ \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ + atomic_cmpxchg__nocheck(ptr, old, new); \ +}) /* Provide shorter names for GCC atomic builtins, return old value */ #define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) @@ -199,6 +186,15 @@ #define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) #define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) + +#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) +#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) /* And even shorter names that return void. */ #define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) @@ -207,6 +203,7 @@ #define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) #define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) +#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) #else /* __ATOMIC_RELAXED */ @@ -238,8 +235,8 @@ * here (a compiler barrier only). QEMU doesn't do accesses to write-combining * qemu memory or non-temporal load/stores from C code. */ -#define smp_wmb() barrier() -#define smp_rmb() barrier() +#define smp_mb_release() barrier() +#define smp_mb_acquire() barrier() /* * __sync_lock_test_and_set() is documented to be an acquire barrier only, @@ -248,11 +245,6 @@ */ #define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) -/* - * Load/store with Java volatile semantics. - */ -#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) - #elif defined(_ARCH_PPC) /* @@ -263,13 +255,15 @@ * smp_mb has the same problem as on x86 for not-very-new GCC * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). */ -#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) +#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) #if defined(__powerpc64__) -#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) +#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) +#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) #else -#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; }) #endif -#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) #endif /* _ARCH_PPC */ @@ -277,18 +271,18 @@ * For (host) platforms we don't have explicit barrier definitions * for, we use the gcc __sync_synchronize() primitive to generate a * full barrier. This should be safe on all platforms, though it may - * be overkill for smp_wmb() and smp_rmb(). + * be overkill for smp_mb_acquire() and smp_mb_release(). */ #ifndef smp_mb -#define smp_mb() __sync_synchronize() +#define smp_mb() __sync_synchronize() #endif -#ifndef smp_wmb -#define smp_wmb() __sync_synchronize() +#ifndef smp_mb_acquire +#define smp_mb_acquire() __sync_synchronize() #endif -#ifndef smp_rmb -#define smp_rmb() __sync_synchronize() +#ifndef smp_mb_release +#define smp_mb_release() __sync_synchronize() #endif #ifndef smp_read_barrier_depends @@ -298,8 +292,11 @@ /* These will only be atomic if the processor does the fetch or store * in a single issue memory operation */ -#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr)) -#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i)) +#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) +#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) + +#define atomic_read(ptr) atomic_read__nocheck(ptr) +#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i) /** * atomic_rcu_read - reads a RCU-protected pointer to a local variable @@ -341,41 +338,16 @@ atomic_set(ptr, i); \ } while (0) -/* These have the same semantics as Java volatile variables. - * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html: - * "1. Issue a StoreStore barrier (wmb) before each volatile store." - * 2. Issue a StoreLoad barrier after each volatile store. - * Note that you could instead issue one before each volatile load, but - * this would be slower for typical programs using volatiles in which - * reads greatly outnumber writes. Alternatively, if available, you - * can implement volatile store as an atomic instruction (for example - * XCHG on x86) and omit the barrier. This may be more efficient if - * atomic instructions are cheaper than StoreLoad barriers. - * 3. Issue LoadLoad and LoadStore barriers after each volatile load." - * - * If you prefer to think in terms of "pairing" of memory barriers, - * an atomic_mb_read pairs with an atomic_mb_set. - * - * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq, - * while an atomic_mb_set is a st.rel followed by a memory barrier. - * - * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST - * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough. - * Just always use the barriers manually by the rules above. - */ -#define atomic_mb_read(ptr) ({ \ +#define atomic_load_acquire(ptr) ({ \ typeof(*ptr) _val = atomic_read(ptr); \ - smp_rmb(); \ + smp_mb_acquire(); \ _val; \ }) -#ifndef atomic_mb_set -#define atomic_mb_set(ptr, i) do { \ - smp_wmb(); \ +#define atomic_store_release(ptr, i) do { \ + smp_mb_release(); \ atomic_set(ptr, i); \ - smp_mb(); \ } while (0) -#endif #ifndef atomic_xchg #if defined(__clang__) @@ -385,15 +357,27 @@ #define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) #endif #endif +#define atomic_xchg__nocheck atomic_xchg /* Provide shorter names for GCC atomic builtins. */ #define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) -#define atomic_fetch_add __sync_fetch_and_add -#define atomic_fetch_sub __sync_fetch_and_sub -#define atomic_fetch_and __sync_fetch_and_and -#define atomic_fetch_or __sync_fetch_and_or -#define atomic_cmpxchg __sync_val_compare_and_swap +#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) +#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) +#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) +#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) +#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) + +#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) +#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) +#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) +#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) +#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) +#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) +#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) + +#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new) +#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) /* And even shorter names that return void. */ #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) @@ -402,6 +386,42 @@ #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) #define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) #define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) +#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) #endif /* __ATOMIC_RELAXED */ + +#ifndef smp_wmb +#define smp_wmb() smp_mb_release() +#endif +#ifndef smp_rmb +#define smp_rmb() smp_mb_acquire() +#endif + +/* This is more efficient than a store plus a fence. */ +#if !defined(__SANITIZE_THREAD__) +#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) +#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) +#endif +#endif + +/* atomic_mb_read/set semantics map Java volatile variables. They are + * less expensive on some platforms (notably POWER) than fully + * sequentially consistent operations. + * + * As long as they are used as paired operations they are safe to + * use. See docs/atomic.txt for more discussion. + */ + +#ifndef atomic_mb_read +#define atomic_mb_read(ptr) \ + atomic_load_acquire(ptr) +#endif + +#ifndef atomic_mb_set +#define atomic_mb_set(ptr, i) do { \ + atomic_store_release(ptr, i); \ + smp_mb(); \ +} while(0) +#endif + #endif /* QEMU_ATOMIC_H */ diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h index 8ab721e..eb46475 100644 --- a/include/qemu/hbitmap.h +++ b/include/qemu/hbitmap.h @@ -146,6 +146,85 @@ void hbitmap_reset_all(HBitmap *hb); bool hbitmap_get(const HBitmap *hb, uint64_t item); /** + * hbitmap_serialization_granularity: + * @hb: HBitmap to operate on. + * + * Granularity of serialization chunks, used by other serialization functions. + * For every chunk: + * 1. Chunk start should be aligned to this granularity. + * 2. Chunk size should be aligned too, except for last chunk (for which + * start + count == hb->size) + */ +uint64_t hbitmap_serialization_granularity(const HBitmap *hb); + +/** + * hbitmap_serialization_size: + * @hb: HBitmap to operate on. + * @start: Starting bit + * @count: Number of bits + * + * Return number of bytes hbitmap_(de)serialize_part needs + */ +uint64_t hbitmap_serialization_size(const HBitmap *hb, + uint64_t start, uint64_t count); + +/** + * hbitmap_serialize_part + * @hb: HBitmap to operate on. + * @buf: Buffer to store serialized bitmap. + * @start: First bit to store. + * @count: Number of bits to store. + * + * Stores HBitmap data corresponding to given region. The format of saved data + * is linear sequence of bits, so it can be used by hbitmap_deserialize_part + * independently of endianness and size of HBitmap level array elements + */ +void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf, + uint64_t start, uint64_t count); + +/** + * hbitmap_deserialize_part + * @hb: HBitmap to operate on. + * @buf: Buffer to restore bitmap data from. + * @start: First bit to restore. + * @count: Number of bits to restore. + * @finish: Whether to call hbitmap_deserialize_finish automatically. + * + * Restores HBitmap data corresponding to given region. The format is the same + * as for hbitmap_serialize_part. + * + * If @finish is false, caller must call hbitmap_serialize_finish before using + * the bitmap. + */ +void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf, + uint64_t start, uint64_t count, + bool finish); + +/** + * hbitmap_deserialize_zeroes + * @hb: HBitmap to operate on. + * @start: First bit to restore. + * @count: Number of bits to restore. + * @finish: Whether to call hbitmap_deserialize_finish automatically. + * + * Fills the bitmap with zeroes. + * + * If @finish is false, caller must call hbitmap_serialize_finish before using + * the bitmap. + */ +void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count, + bool finish); + +/** + * hbitmap_deserialize_finish + * @hb: HBitmap to operate on. + * + * Repair HBitmap after calling hbitmap_deserialize_data. Actually, all HBitmap + * layers are restored here. + */ +void hbitmap_deserialize_finish(HBitmap *hb); + +/** * hbitmap_free: * @hb: HBitmap to operate on. * @@ -178,6 +257,27 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); */ unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); +/* hbitmap_create_meta: + * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. + * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to + * free it. + * + * Currently, we only guarantee that if a bit in the hbitmap is changed it + * will be reflected in the meta bitmap, but we do not yet guarantee the + * opposite. + * + * @hb: The HBitmap to operate on. + * @chunk_size: How many bits in @hb does one bit in the meta track. + */ +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size); + +/* hbitmap_free_meta: + * Free the meta bitmap of @hb. + * + * @hb: The HBitmap whose meta bitmap should be freed. + */ +void hbitmap_free_meta(HBitmap *hb); + /** * hbitmap_iter_next: * @hbi: HBitmapIter to operate on. diff --git a/include/qemu/int128.h b/include/qemu/int128.h index c598881..5c9890d 100644 --- a/include/qemu/int128.h +++ b/include/qemu/int128.h @@ -1,6 +1,149 @@ #ifndef INT128_H #define INT128_H +#ifdef CONFIG_INT128 +#include "qemu/bswap.h" + +typedef __int128_t Int128; + +static inline Int128 int128_make64(uint64_t a) +{ + return a; +} + +static inline Int128 int128_make128(uint64_t lo, uint64_t hi) +{ + return (__uint128_t)hi << 64 | lo; +} + +static inline uint64_t int128_get64(Int128 a) +{ + uint64_t r = a; + assert(r == a); + return r; +} + +static inline uint64_t int128_getlo(Int128 a) +{ + return a; +} + +static inline int64_t int128_gethi(Int128 a) +{ + return a >> 64; +} + +static inline Int128 int128_zero(void) +{ + return 0; +} + +static inline Int128 int128_one(void) +{ + return 1; +} + +static inline Int128 int128_2_64(void) +{ + return (Int128)1 << 64; +} + +static inline Int128 int128_exts64(int64_t a) +{ + return a; +} + +static inline Int128 int128_and(Int128 a, Int128 b) +{ + return a & b; +} + +static inline Int128 int128_rshift(Int128 a, int n) +{ + return a >> n; +} + +static inline Int128 int128_add(Int128 a, Int128 b) +{ + return a + b; +} + +static inline Int128 int128_neg(Int128 a) +{ + return -a; +} + +static inline Int128 int128_sub(Int128 a, Int128 b) +{ + return a - b; +} + +static inline bool int128_nonneg(Int128 a) +{ + return a >= 0; +} + +static inline bool int128_eq(Int128 a, Int128 b) +{ + return a == b; +} + +static inline bool int128_ne(Int128 a, Int128 b) +{ + return a != b; +} + +static inline bool int128_ge(Int128 a, Int128 b) +{ + return a >= b; +} + +static inline bool int128_lt(Int128 a, Int128 b) +{ + return a < b; +} + +static inline bool int128_le(Int128 a, Int128 b) +{ + return a <= b; +} + +static inline bool int128_gt(Int128 a, Int128 b) +{ + return a > b; +} + +static inline bool int128_nz(Int128 a) +{ + return a != 0; +} + +static inline Int128 int128_min(Int128 a, Int128 b) +{ + return a < b ? a : b; +} + +static inline Int128 int128_max(Int128 a, Int128 b) +{ + return a > b ? a : b; +} + +static inline void int128_addto(Int128 *a, Int128 b) +{ + *a += b; +} + +static inline void int128_subfrom(Int128 *a, Int128 b) +{ + *a -= b; +} + +static inline Int128 bswap128(Int128 a) +{ + return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a))); +} + +#else /* !CONFIG_INT128 */ typedef struct Int128 Int128; @@ -14,12 +157,27 @@ static inline Int128 int128_make64(uint64_t a) return (Int128) { a, 0 }; } +static inline Int128 int128_make128(uint64_t lo, uint64_t hi) +{ + return (Int128) { lo, hi }; +} + static inline uint64_t int128_get64(Int128 a) { assert(!a.hi); return a.lo; } +static inline uint64_t int128_getlo(Int128 a) +{ + return a.lo; +} + +static inline int64_t int128_gethi(Int128 a) +{ + return a.hi; +} + static inline Int128 int128_zero(void) { return int128_make64(0); @@ -53,9 +211,9 @@ static inline Int128 int128_rshift(Int128 a, int n) } h = a.hi >> (n & 63); if (n >= 64) { - return (Int128) { h, h >> 63 }; + return int128_make128(h, h >> 63); } else { - return (Int128) { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h }; + return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h); } } @@ -69,18 +227,18 @@ static inline Int128 int128_add(Int128 a, Int128 b) * * So the carry is lo < a.lo. */ - return (Int128) { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) }; + return int128_make128(lo, (uint64_t)a.hi + b.hi + (lo < a.lo)); } static inline Int128 int128_neg(Int128 a) { uint64_t lo = -a.lo; - return (Int128) { lo, ~(uint64_t)a.hi + !lo }; + return int128_make128(lo, ~(uint64_t)a.hi + !lo); } static inline Int128 int128_sub(Int128 a, Int128 b) { - return (Int128){ a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) }; + return int128_make128(a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo)); } static inline bool int128_nonneg(Int128 a) @@ -143,4 +301,5 @@ static inline void int128_subfrom(Int128 *a, Int128 b) *a = int128_sub(*a, b); } -#endif +#endif /* CONFIG_INT128 */ +#endif /* INT128_H */ diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index b113fcf..1b8c30a 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -11,6 +11,7 @@ typedef struct AioContext AioContext; typedef struct AllwinnerAHCIState AllwinnerAHCIState; typedef struct AudioState AudioState; typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; +typedef struct BdrvDirtyBitmapIter BdrvDirtyBitmapIter; typedef struct BlockBackend BlockBackend; typedef struct BlockBackendRootState BlockBackendRootState; typedef struct BlockDriverState BlockDriverState; diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 6d481a1..633c3fc 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -946,7 +946,9 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) GCC_FMT_ATTR(2, 3); -void cpu_exec_exit(CPUState *cpu); +void cpu_exec_initfn(CPUState *cpu); +void cpu_exec_realizefn(CPUState *cpu, Error **errp); +void cpu_exec_unrealizefn(CPUState *cpu); #ifdef CONFIG_SOFTMMU extern const struct VMStateDescription vmstate_cpu_common; diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index b07159b..6444e41 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -146,6 +146,7 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count, BlockCompletionFunc *cb, void *opaque); void blk_aio_cancel(BlockAIOCB *acb); void blk_aio_cancel_async(BlockAIOCB *acb); +int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf); int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf); BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque); diff --git a/include/sysemu/char.h b/include/sysemu/char.h index 19dad3f..0a14942 100644 --- a/include/sysemu/char.h +++ b/include/sysemu/char.h @@ -13,12 +13,13 @@ /* character device */ -#define CHR_EVENT_BREAK 0 /* serial break char */ -#define CHR_EVENT_FOCUS 1 /* focus to this terminal (modal input needed) */ -#define CHR_EVENT_OPENED 2 /* new connection established */ -#define CHR_EVENT_MUX_IN 3 /* mux-focus was set to this terminal */ -#define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */ -#define CHR_EVENT_CLOSED 5 /* connection closed */ +typedef enum { + CHR_EVENT_BREAK, /* serial break char */ + CHR_EVENT_OPENED, /* new connection established */ + CHR_EVENT_MUX_IN, /* mux-focus was set to this terminal */ + CHR_EVENT_MUX_OUT, /* mux-focus will move on */ + CHR_EVENT_CLOSED /* connection closed */ +} QEMUChrEvent; #define CHR_IOCTL_SERIAL_SET_PARAMS 1 @@ -72,10 +73,20 @@ typedef enum { QEMU_CHAR_FEATURE_LAST, } CharDriverFeature; +/* This is the backend as seen by frontend, the actual backend is + * CharDriverState */ +typedef struct CharBackend { + CharDriverState *chr; + IOEventHandler *chr_event; + IOCanReadHandler *chr_can_read; + IOReadHandler *chr_read; + void *opaque; + int tag; + int fe_open; +} CharBackend; struct CharDriverState { QemuMutex chr_write_lock; - void (*init)(struct CharDriverState *s); int (*chr_write)(struct CharDriverState *s, const uint8_t *buf, int len); int (*chr_sync_read)(struct CharDriverState *s, const uint8_t *buf, int len); @@ -87,25 +98,17 @@ struct CharDriverState { int (*set_msgfds)(struct CharDriverState *s, int *fds, int num); int (*chr_add_client)(struct CharDriverState *chr, int fd); int (*chr_wait_connected)(struct CharDriverState *chr, Error **errp); - IOEventHandler *chr_event; - IOCanReadHandler *chr_can_read; - IOReadHandler *chr_read; - void *handler_opaque; - void (*chr_close)(struct CharDriverState *chr); + void (*chr_free)(struct CharDriverState *chr); void (*chr_disconnect)(struct CharDriverState *chr); void (*chr_accept_input)(struct CharDriverState *chr); void (*chr_set_echo)(struct CharDriverState *chr, bool echo); void (*chr_set_fe_open)(struct CharDriverState *chr, int fe_open); - void (*chr_fe_event)(struct CharDriverState *chr, int event); + CharBackend *be; void *opaque; char *label; char *filename; int logfd; int be_open; - int fe_open; - int explicit_fe_open; - int explicit_be_open; - int avail_connections; int is_mux; guint fd_in_tag; bool replay; @@ -130,13 +133,11 @@ CharDriverState *qemu_chr_alloc(ChardevCommon *backend, Error **errp); * Create a new character backend from a QemuOpts list. * * @opts see qemu-config.c for a list of valid options - * @init not sure.. * * Returns: a new character backend */ CharDriverState *qemu_chr_new_from_opts(QemuOpts *opts, - void (*init)(struct CharDriverState *s), - Error **errp); + Error **errp); /** * @qemu_chr_parse_common: @@ -155,18 +156,19 @@ void qemu_chr_parse_common(QemuOpts *opts, ChardevCommon *backend); * * @label the name of the backend * @filename the URI - * @init not sure.. * * Returns: a new character backend */ -CharDriverState *qemu_chr_new(const char *label, const char *filename, - void (*init)(struct CharDriverState *s)); +CharDriverState *qemu_chr_new(const char *label, const char *filename); + + /** - * @qemu_chr_disconnect: + * @qemu_chr_fe_disconnect: * * Close a fd accpeted by character backend. + * Without associated CharDriver, do nothing. */ -void qemu_chr_disconnect(CharDriverState *chr); +void qemu_chr_fe_disconnect(CharBackend *be); /** * @qemu_chr_cleanup: @@ -176,11 +178,12 @@ void qemu_chr_disconnect(CharDriverState *chr); void qemu_chr_cleanup(void); /** - * @qemu_chr_wait_connected: + * @qemu_chr_fe_wait_connected: * - * Wait for characted backend to be connected. + * Wait for characted backend to be connected, return < 0 on error or + * if no assicated CharDriver. */ -int qemu_chr_wait_connected(CharDriverState *chr, Error **errp); +int qemu_chr_fe_wait_connected(CharBackend *be, Error **errp); /** * @qemu_chr_new_noreplay: @@ -191,12 +194,10 @@ int qemu_chr_wait_connected(CharDriverState *chr, Error **errp); * * @label the name of the backend * @filename the URI - * @init not sure.. * * Returns: a new character backend */ -CharDriverState *qemu_chr_new_noreplay(const char *label, const char *filename, - void (*init)(struct CharDriverState *s)); +CharDriverState *qemu_chr_new_noreplay(const char *label, const char *filename); /** * @qemu_chr_delete: @@ -219,37 +220,31 @@ void qemu_chr_free(CharDriverState *chr); * Ask the backend to override its normal echo setting. This only really * applies to the stdio backend and is used by the QMP server such that you * can see what you type if you try to type QMP commands. + * Without associated CharDriver, do nothing. * * @echo true to enable echo, false to disable echo */ -void qemu_chr_fe_set_echo(struct CharDriverState *chr, bool echo); +void qemu_chr_fe_set_echo(CharBackend *be, bool echo); /** * @qemu_chr_fe_set_open: * * Set character frontend open status. This is an indication that the * front end is ready (or not) to begin doing I/O. + * Without associated CharDriver, do nothing. */ -void qemu_chr_fe_set_open(struct CharDriverState *chr, int fe_open); - -/** - * @qemu_chr_fe_event: - * - * Send an event from the front end to the back end. - * - * @event the event to send - */ -void qemu_chr_fe_event(CharDriverState *s, int event); +void qemu_chr_fe_set_open(CharBackend *be, int fe_open); /** * @qemu_chr_fe_printf: * - * Write to a character backend using a printf style interface. - * This function is thread-safe. + * Write to a character backend using a printf style interface. This + * function is thread-safe. It does nothing without associated + * CharDriver. * * @fmt see #printf */ -void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...) +void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...) GCC_FMT_ATTR(2, 3); /** @@ -258,13 +253,13 @@ void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...) * If the backend is connected, create and add a #GSource that fires * when the given condition (typically G_IO_OUT|G_IO_HUP or G_IO_HUP) * is active; return the #GSource's tag. If it is disconnected, - * return 0. + * or without associated CharDriver, return 0. * * @cond the condition to poll for * @func the function to call when the condition happens * @user_data the opaque pointer to pass to @func */ -guint qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond, +guint qemu_chr_fe_add_watch(CharBackend *be, GIOCondition cond, GIOFunc func, void *user_data); /** @@ -277,9 +272,9 @@ guint qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond, * @buf the data * @len the number of bytes to send * - * Returns: the number of bytes consumed + * Returns: the number of bytes consumed (0 if no assicated CharDriver) */ -int qemu_chr_fe_write(CharDriverState *s, const uint8_t *buf, int len); +int qemu_chr_fe_write(CharBackend *be, const uint8_t *buf, int len); /** * @qemu_chr_fe_write_all: @@ -292,9 +287,9 @@ int qemu_chr_fe_write(CharDriverState *s, const uint8_t *buf, int len); * @buf the data * @len the number of bytes to send * - * Returns: the number of bytes consumed + * Returns: the number of bytes consumed (0 if no assicated CharDriver) */ -int qemu_chr_fe_write_all(CharDriverState *s, const uint8_t *buf, int len); +int qemu_chr_fe_write_all(CharBackend *be, const uint8_t *buf, int len); /** * @qemu_chr_fe_read_all: @@ -304,9 +299,9 @@ int qemu_chr_fe_write_all(CharDriverState *s, const uint8_t *buf, int len); * @buf the data buffer * @len the number of bytes to read * - * Returns: the number of bytes read + * Returns: the number of bytes read (0 if no assicated CharDriver) */ -int qemu_chr_fe_read_all(CharDriverState *s, uint8_t *buf, int len); +int qemu_chr_fe_read_all(CharBackend *be, uint8_t *buf, int len); /** * @qemu_chr_fe_ioctl: @@ -316,10 +311,11 @@ int qemu_chr_fe_read_all(CharDriverState *s, uint8_t *buf, int len); * @cmd see CHR_IOCTL_* * @arg the data associated with @cmd * - * Returns: if @cmd is not supported by the backend, -ENOTSUP, otherwise the - * return value depends on the semantics of @cmd + * Returns: if @cmd is not supported by the backend or there is no + * associated CharDriver, -ENOTSUP, otherwise the return + * value depends on the semantics of @cmd */ -int qemu_chr_fe_ioctl(CharDriverState *s, int cmd, void *arg); +int qemu_chr_fe_ioctl(CharBackend *be, int cmd, void *arg); /** * @qemu_chr_fe_get_msgfd: @@ -332,7 +328,7 @@ int qemu_chr_fe_ioctl(CharDriverState *s, int cmd, void *arg); * this function will return -1 until a client sends a new file * descriptor. */ -int qemu_chr_fe_get_msgfd(CharDriverState *s); +int qemu_chr_fe_get_msgfd(CharBackend *be); /** * @qemu_chr_fe_get_msgfds: @@ -345,7 +341,7 @@ int qemu_chr_fe_get_msgfd(CharDriverState *s); * this function will return -1 until a client sends a new set of file * descriptors. */ -int qemu_chr_fe_get_msgfds(CharDriverState *s, int *fds, int num); +int qemu_chr_fe_get_msgfds(CharBackend *be, int *fds, int num); /** * @qemu_chr_fe_set_msgfds: @@ -356,38 +352,9 @@ int qemu_chr_fe_get_msgfds(CharDriverState *s, int *fds, int num); * result in overwriting the fd array with the new value without being send. * Upon writing the message the fd array is freed. * - * Returns: -1 if fd passing isn't supported. - */ -int qemu_chr_fe_set_msgfds(CharDriverState *s, int *fds, int num); - -/** - * @qemu_chr_fe_claim: - * - * Claim a backend before using it, should be called before calling - * qemu_chr_add_handlers(). - * - * Returns: -1 if the backend is already in use by another frontend, 0 on - * success. - */ -int qemu_chr_fe_claim(CharDriverState *s); - -/** - * @qemu_chr_fe_claim_no_fail: - * - * Like qemu_chr_fe_claim, but will exit qemu with an error when the - * backend is already in use. + * Returns: -1 if fd passing isn't supported or no associated CharDriver. */ -void qemu_chr_fe_claim_no_fail(CharDriverState *s); - -/** - * @qemu_chr_fe_claim: - * - * Release a backend for use by another frontend. - * - * Returns: -1 if the backend is already in use by another frontend, 0 on - * success. - */ -void qemu_chr_fe_release(CharDriverState *s); +int qemu_chr_fe_set_msgfds(CharBackend *be, int *fds, int num); /** * @qemu_chr_be_can_write: @@ -432,22 +399,70 @@ void qemu_chr_be_write_impl(CharDriverState *s, uint8_t *buf, int len); */ void qemu_chr_be_event(CharDriverState *s, int event); -void qemu_chr_add_handlers(CharDriverState *s, - IOCanReadHandler *fd_can_read, - IOReadHandler *fd_read, - IOEventHandler *fd_event, - void *opaque); +/** + * @qemu_chr_fe_init: + * + * Initializes a front end for the given CharBackend and + * CharDriver. Call qemu_chr_fe_deinit() to remove the association and + * release the driver. + * + * Returns: false on error. + */ +bool qemu_chr_fe_init(CharBackend *b, CharDriverState *s, Error **errp); + +/** + * @qemu_chr_fe_get_driver: + * + * Returns the driver associated with a CharBackend or NULL if no + * associated CharDriver. + */ +CharDriverState *qemu_chr_fe_get_driver(CharBackend *be); + +/** + * @qemu_chr_fe_deinit: + * + * Dissociate the CharBackend from the CharDriver. + * + * Safe to call without associated CharDriver. + */ +void qemu_chr_fe_deinit(CharBackend *b); -/* This API can make handler run in the context what you pass to. */ -void qemu_chr_add_handlers_full(CharDriverState *s, - IOCanReadHandler *fd_can_read, - IOReadHandler *fd_read, - IOEventHandler *fd_event, - void *opaque, - GMainContext *context); +/** + * @qemu_chr_fe_set_handlers: + * @b: a CharBackend + * @fd_can_read: callback to get the amount of data the frontend may + * receive + * @fd_read: callback to receive data from char + * @fd_event: event callback + * @opaque: an opaque pointer for the callbacks + * @context: a main loop context or NULL for the default + * @set_open: whether to call qemu_chr_fe_set_open() implicitely when + * any of the handler is non-NULL + * + * Set the front end char handlers. The front end takes the focus if + * any of the handler is non-NULL. + * + * Without associated CharDriver, nothing is changed. + */ +void qemu_chr_fe_set_handlers(CharBackend *b, + IOCanReadHandler *fd_can_read, + IOReadHandler *fd_read, + IOEventHandler *fd_event, + void *opaque, + GMainContext *context, + bool set_open); + +/** + * @qemu_chr_fe_take_focus: + * + * Take the focus (if the front end is muxed). + * + * Without associated CharDriver, nothing is changed. + */ +void qemu_chr_fe_take_focus(CharBackend *b); void qemu_chr_be_generic_open(CharDriverState *s); -void qemu_chr_accept_input(CharDriverState *s); +void qemu_chr_fe_accept_input(CharBackend *be); int qemu_chr_add_client(CharDriverState *s, int fd); CharDriverState *qemu_chr_find(const char *name); bool chr_is_ringbuf(const CharDriverState *chr); @@ -458,10 +473,15 @@ void qemu_chr_set_feature(CharDriverState *chr, CharDriverFeature feature); QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename); +typedef void CharDriverParse(QemuOpts *opts, ChardevBackend *backend, + Error **errp); +typedef CharDriverState *CharDriverCreate(const char *id, + ChardevBackend *backend, + ChardevReturn *ret, bool *be_opened, + Error **errp); + void register_char_driver(const char *name, ChardevBackendKind kind, - void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp), - CharDriverState *(*create)(const char *id, ChardevBackend *backend, - ChardevReturn *ret, Error **errp)); + CharDriverParse *parse, CharDriverCreate *create); extern int term_escape_char; diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h index 34c8eaf..c228c66 100644 --- a/include/sysemu/dma.h +++ b/include/sysemu/dma.h @@ -199,14 +199,14 @@ typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov, void *opaque); BlockAIOCB *dma_blk_io(AioContext *ctx, - QEMUSGList *sg, uint64_t offset, + QEMUSGList *sg, uint64_t offset, uint32_t align, DMAIOFunc *io_func, void *io_func_opaque, BlockCompletionFunc *cb, void *opaque, DMADirection dir); BlockAIOCB *dma_blk_read(BlockBackend *blk, - QEMUSGList *sg, uint64_t offset, + QEMUSGList *sg, uint64_t offset, uint32_t align, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *dma_blk_write(BlockBackend *blk, - QEMUSGList *sg, uint64_t offset, + QEMUSGList *sg, uint64_t offset, uint32_t align, BlockCompletionFunc *cb, void *opaque); uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index b668833..66c6f15 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -173,7 +173,7 @@ extern int mem_prealloc; * * Note that cpu->get_arch_id() may be larger than MAX_CPUMASK_BITS. */ -#define MAX_CPUMASK_BITS 255 +#define MAX_CPUMASK_BITS 288 #define MAX_OPTION_ROMS 16 typedef struct QEMUOptionRom { |