aboutsummaryrefslogtreecommitdiff
path: root/system
diff options
context:
space:
mode:
Diffstat (limited to 'system')
-rw-r--r--system/main.c13
-rw-r--r--system/memory.c54
-rw-r--r--system/meson.build3
-rw-r--r--system/physmem.c23
-rw-r--r--system/ram-block-attributes.c444
-rw-r--r--system/runstate.c65
-rw-r--r--system/trace-events3
-rw-r--r--system/vl.c5
8 files changed, 568 insertions, 42 deletions
diff --git a/system/main.c b/system/main.c
index 1c02206..b8f7157 100644
--- a/system/main.c
+++ b/system/main.c
@@ -69,8 +69,21 @@ int (*qemu_main)(void) = os_darwin_cfrunloop_main;
int main(int argc, char **argv)
{
qemu_init(argc, argv);
+
+ /*
+ * qemu_init acquires the BQL and replay mutex lock. BQL is acquired when
+ * initializing cpus, to block associated threads until initialization is
+ * complete. Replay_mutex lock is acquired on initialization, because it
+ * must be held when configuring icount_mode.
+ *
+ * On MacOS, qemu main event loop runs in a background thread, as main
+ * thread must be reserved for UI. Thus, we need to transfer lock ownership,
+ * and the simplest way to do that is to release them, and reacquire them
+ * from qemu_default_main.
+ */
bql_unlock();
replay_mutex_unlock();
+
if (qemu_main) {
QemuThread main_loop_thread;
qemu_thread_create(&main_loop_thread, "qemu_main",
diff --git a/system/memory.c b/system/memory.c
index 63b983e..76b44b8 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -2106,12 +2106,16 @@ RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
return mr->rdm;
}
-void memory_region_set_ram_discard_manager(MemoryRegion *mr,
- RamDiscardManager *rdm)
+int memory_region_set_ram_discard_manager(MemoryRegion *mr,
+ RamDiscardManager *rdm)
{
g_assert(memory_region_is_ram(mr));
- g_assert(!rdm || !mr->rdm);
+ if (mr->rdm && rdm) {
+ return -EBUSY;
+ }
+
mr->rdm = rdm;
+ return 0;
}
uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
@@ -2134,7 +2138,7 @@ bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
MemoryRegionSection *section,
- ReplayRamPopulate replay_fn,
+ ReplayRamDiscardState replay_fn,
void *opaque)
{
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
@@ -2143,15 +2147,15 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
return rdmc->replay_populated(rdm, section, replay_fn, opaque);
}
-void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
- MemoryRegionSection *section,
- ReplayRamDiscard replay_fn,
- void *opaque)
+int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
{
RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
g_assert(rdmc->replay_discarded);
- rdmc->replay_discarded(rdm, section, replay_fn, opaque);
+ return rdmc->replay_discarded(rdm, section, replay_fn, opaque);
}
void ram_discard_manager_register_listener(RamDiscardManager *rdm,
@@ -2174,18 +2178,14 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
}
/* Called with rcu_read_lock held. */
-bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
- ram_addr_t *ram_addr, bool *read_only,
- bool *mr_has_discard_manager, Error **errp)
+MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p,
+ Error **errp)
{
MemoryRegion *mr;
hwaddr xlat;
hwaddr len = iotlb->addr_mask + 1;
bool writable = iotlb->perm & IOMMU_WO;
- if (mr_has_discard_manager) {
- *mr_has_discard_manager = false;
- }
/*
* The IOMMU TLB entry we have just covers translation through
* this IOMMU to its immediate target. We need to translate
@@ -2195,7 +2195,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
if (!memory_region_is_ram(mr)) {
error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat);
- return false;
+ return NULL;
} else if (memory_region_has_ram_discard_manager(mr)) {
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
MemoryRegionSection tmp = {
@@ -2203,9 +2203,6 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
.offset_within_region = xlat,
.size = int128_make64(len),
};
- if (mr_has_discard_manager) {
- *mr_has_discard_manager = true;
- }
/*
* Malicious VMs can map memory into the IOMMU, which is expected
* to remain discarded. vfio will pin all pages, populating memory.
@@ -2216,7 +2213,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
error_setg(errp, "iommu map to discarded memory (e.g., unplugged"
" via virtio-mem): %" HWADDR_PRIx "",
iotlb->translated_addr);
- return false;
+ return NULL;
}
}
@@ -2226,22 +2223,11 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
*/
if (len & iotlb->addr_mask) {
error_setg(errp, "iommu has granularity incompatible with target AS");
- return false;
- }
-
- if (vaddr) {
- *vaddr = memory_region_get_ram_ptr(mr) + xlat;
- }
-
- if (ram_addr) {
- *ram_addr = memory_region_get_ram_addr(mr) + xlat;
- }
-
- if (read_only) {
- *read_only = !writable || mr->readonly;
+ return NULL;
}
- return true;
+ *xlat_p = xlat;
+ return mr;
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
diff --git a/system/meson.build b/system/meson.build
index c2f0082..6d21ff9 100644
--- a/system/meson.build
+++ b/system/meson.build
@@ -7,7 +7,7 @@ system_ss.add(files(
'vl.c',
), sdl, libpmem, libdaxctl)
-libsystem_ss.add(files(
+system_ss.add(files(
'balloon.c',
'bootdevice.c',
'cpus.c',
@@ -17,6 +17,7 @@ libsystem_ss.add(files(
'dma-helpers.c',
'globals.c',
'ioport.c',
+ 'ram-block-attributes.c',
'memory_mapping.c',
'memory.c',
'physmem.c',
diff --git a/system/physmem.c b/system/physmem.c
index a8a9ca3..ff0ca40 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -1916,7 +1916,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
}
assert(new_block->guest_memfd < 0);
- ret = ram_block_discard_require(true);
+ ret = ram_block_coordinated_discard_require(true);
if (ret < 0) {
error_setg_errno(errp, -ret,
"cannot set up private guest memory: discard currently blocked");
@@ -1932,6 +1932,24 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
}
/*
+ * The attribute bitmap of the RamBlockAttributes is default to
+ * discarded, which mimics the behavior of kvm_set_phys_mem() when it
+ * calls kvm_set_memory_attributes_private(). This leads to a brief
+ * period of inconsistency between the creation of the RAMBlock and its
+ * mapping into the physical address space. However, this is not
+ * problematic, as no users rely on the attribute status to perform
+ * any actions during this interval.
+ */
+ new_block->attributes = ram_block_attributes_create(new_block);
+ if (!new_block->attributes) {
+ error_setg(errp, "Failed to create ram block attribute");
+ close(new_block->guest_memfd);
+ ram_block_coordinated_discard_require(false);
+ qemu_mutex_unlock_ramlist();
+ goto out_free;
+ }
+
+ /*
* Add a specific guest_memfd blocker if a generic one would not be
* added by ram_block_add_cpr_blocker.
*/
@@ -2287,8 +2305,9 @@ static void reclaim_ramblock(RAMBlock *block)
}
if (block->guest_memfd >= 0) {
+ ram_block_attributes_destroy(block->attributes);
close(block->guest_memfd);
- ram_block_discard_require(false);
+ ram_block_coordinated_discard_require(false);
}
g_free(block);
diff --git a/system/ram-block-attributes.c b/system/ram-block-attributes.c
new file mode 100644
index 0000000..68e8a02
--- /dev/null
+++ b/system/ram-block-attributes.c
@@ -0,0 +1,444 @@
+/*
+ * QEMU ram block attributes
+ *
+ * Copyright Intel
+ *
+ * Author:
+ * Chenyi Qiang <chenyi.qiang@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "system/ramblock.h"
+#include "trace.h"
+
+OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(RamBlockAttributes,
+ ram_block_attributes,
+ RAM_BLOCK_ATTRIBUTES,
+ OBJECT,
+ { TYPE_RAM_DISCARD_MANAGER },
+ { })
+
+static size_t
+ram_block_attributes_get_block_size(const RamBlockAttributes *attr)
+{
+ /*
+ * Because page conversion could be manipulated in the size of at least 4K
+ * or 4K aligned, Use the host page size as the granularity to track the
+ * memory attribute.
+ */
+ g_assert(attr && attr->ram_block);
+ g_assert(attr->ram_block->page_size == qemu_real_host_page_size());
+ return attr->ram_block->page_size;
+}
+
+
+static bool
+ram_block_attributes_rdm_is_populated(const RamDiscardManager *rdm,
+ const MemoryRegionSection *section)
+{
+ const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ const uint64_t first_bit = section->offset_within_region / block_size;
+ const uint64_t last_bit =
+ first_bit + int128_get64(section->size) / block_size - 1;
+ unsigned long first_discarded_bit;
+
+ first_discarded_bit = find_next_zero_bit(attr->bitmap, last_bit + 1,
+ first_bit);
+ return first_discarded_bit > last_bit;
+}
+
+typedef int (*ram_block_attributes_section_cb)(MemoryRegionSection *s,
+ void *arg);
+
+static int
+ram_block_attributes_notify_populate_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamDiscardListener *rdl = arg;
+
+ return rdl->notify_populate(rdl, section);
+}
+
+static int
+ram_block_attributes_notify_discard_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamDiscardListener *rdl = arg;
+
+ rdl->notify_discard(rdl, section);
+ return 0;
+}
+
+static int
+ram_block_attributes_for_each_populated_section(const RamBlockAttributes *attr,
+ MemoryRegionSection *section,
+ void *arg,
+ ram_block_attributes_section_cb cb)
+{
+ unsigned long first_bit, last_bit;
+ uint64_t offset, size;
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ int ret = 0;
+
+ first_bit = section->offset_within_region / block_size;
+ first_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit);
+
+ while (first_bit < attr->bitmap_size) {
+ MemoryRegionSection tmp = *section;
+
+ offset = first_bit * block_size;
+ last_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size,
+ first_bit + 1) - 1;
+ size = (last_bit - first_bit + 1) * block_size;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ break;
+ }
+
+ ret = cb(&tmp, arg);
+ if (ret) {
+ error_report("%s: Failed to notify RAM discard listener: %s",
+ __func__, strerror(-ret));
+ break;
+ }
+
+ first_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ last_bit + 2);
+ }
+
+ return ret;
+}
+
+static int
+ram_block_attributes_for_each_discarded_section(const RamBlockAttributes *attr,
+ MemoryRegionSection *section,
+ void *arg,
+ ram_block_attributes_section_cb cb)
+{
+ unsigned long first_bit, last_bit;
+ uint64_t offset, size;
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ int ret = 0;
+
+ first_bit = section->offset_within_region / block_size;
+ first_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size,
+ first_bit);
+
+ while (first_bit < attr->bitmap_size) {
+ MemoryRegionSection tmp = *section;
+
+ offset = first_bit * block_size;
+ last_bit = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit + 1) - 1;
+ size = (last_bit - first_bit + 1) * block_size;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ break;
+ }
+
+ ret = cb(&tmp, arg);
+ if (ret) {
+ error_report("%s: Failed to notify RAM discard listener: %s",
+ __func__, strerror(-ret));
+ break;
+ }
+
+ first_bit = find_next_zero_bit(attr->bitmap,
+ attr->bitmap_size,
+ last_bit + 2);
+ }
+
+ return ret;
+}
+
+static uint64_t
+ram_block_attributes_rdm_get_min_granularity(const RamDiscardManager *rdm,
+ const MemoryRegion *mr)
+{
+ const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+
+ g_assert(mr == attr->ram_block->mr);
+ return ram_block_attributes_get_block_size(attr);
+}
+
+static void
+ram_block_attributes_rdm_register_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl,
+ MemoryRegionSection *section)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ int ret;
+
+ g_assert(section->mr == attr->ram_block->mr);
+ rdl->section = memory_region_section_new_copy(section);
+
+ QLIST_INSERT_HEAD(&attr->rdl_list, rdl, next);
+
+ ret = ram_block_attributes_for_each_populated_section(attr, section, rdl,
+ ram_block_attributes_notify_populate_cb);
+ if (ret) {
+ error_report("%s: Failed to register RAM discard listener: %s",
+ __func__, strerror(-ret));
+ exit(1);
+ }
+}
+
+static void
+ram_block_attributes_rdm_unregister_listener(RamDiscardManager *rdm,
+ RamDiscardListener *rdl)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ int ret;
+
+ g_assert(rdl->section);
+ g_assert(rdl->section->mr == attr->ram_block->mr);
+
+ if (rdl->double_discard_supported) {
+ rdl->notify_discard(rdl, rdl->section);
+ } else {
+ ret = ram_block_attributes_for_each_populated_section(attr,
+ rdl->section, rdl, ram_block_attributes_notify_discard_cb);
+ if (ret) {
+ error_report("%s: Failed to unregister RAM discard listener: %s",
+ __func__, strerror(-ret));
+ exit(1);
+ }
+ }
+
+ memory_region_section_free_copy(rdl->section);
+ rdl->section = NULL;
+ QLIST_REMOVE(rdl, next);
+}
+
+typedef struct RamBlockAttributesReplayData {
+ ReplayRamDiscardState fn;
+ void *opaque;
+} RamBlockAttributesReplayData;
+
+static int ram_block_attributes_rdm_replay_cb(MemoryRegionSection *section,
+ void *arg)
+{
+ RamBlockAttributesReplayData *data = arg;
+
+ return data->fn(section, data->opaque);
+}
+
+static int
+ram_block_attributes_rdm_replay_populated(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque };
+
+ g_assert(section->mr == attr->ram_block->mr);
+ return ram_block_attributes_for_each_populated_section(attr, section, &data,
+ ram_block_attributes_rdm_replay_cb);
+}
+
+static int
+ram_block_attributes_rdm_replay_discarded(const RamDiscardManager *rdm,
+ MemoryRegionSection *section,
+ ReplayRamDiscardState replay_fn,
+ void *opaque)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm);
+ RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque };
+
+ g_assert(section->mr == attr->ram_block->mr);
+ return ram_block_attributes_for_each_discarded_section(attr, section, &data,
+ ram_block_attributes_rdm_replay_cb);
+}
+
+static bool
+ram_block_attributes_is_valid_range(RamBlockAttributes *attr, uint64_t offset,
+ uint64_t size)
+{
+ MemoryRegion *mr = attr->ram_block->mr;
+
+ g_assert(mr);
+
+ uint64_t region_size = memory_region_size(mr);
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+
+ if (!QEMU_IS_ALIGNED(offset, block_size) ||
+ !QEMU_IS_ALIGNED(size, block_size)) {
+ return false;
+ }
+ if (offset + size <= offset) {
+ return false;
+ }
+ if (offset + size > region_size) {
+ return false;
+ }
+ return true;
+}
+
+static void ram_block_attributes_notify_discard(RamBlockAttributes *attr,
+ uint64_t offset,
+ uint64_t size)
+{
+ RamDiscardListener *rdl;
+
+ QLIST_FOREACH(rdl, &attr->rdl_list, next) {
+ MemoryRegionSection tmp = *rdl->section;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ continue;
+ }
+ rdl->notify_discard(rdl, &tmp);
+ }
+}
+
+static int
+ram_block_attributes_notify_populate(RamBlockAttributes *attr,
+ uint64_t offset, uint64_t size)
+{
+ RamDiscardListener *rdl;
+ int ret = 0;
+
+ QLIST_FOREACH(rdl, &attr->rdl_list, next) {
+ MemoryRegionSection tmp = *rdl->section;
+
+ if (!memory_region_section_intersect_range(&tmp, offset, size)) {
+ continue;
+ }
+ ret = rdl->notify_populate(rdl, &tmp);
+ if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int ram_block_attributes_state_change(RamBlockAttributes *attr,
+ uint64_t offset, uint64_t size,
+ bool to_discard)
+{
+ const size_t block_size = ram_block_attributes_get_block_size(attr);
+ const unsigned long first_bit = offset / block_size;
+ const unsigned long nbits = size / block_size;
+ const unsigned long last_bit = first_bit + nbits - 1;
+ const bool is_discarded = find_next_bit(attr->bitmap, attr->bitmap_size,
+ first_bit) > last_bit;
+ const bool is_populated = find_next_zero_bit(attr->bitmap,
+ attr->bitmap_size, first_bit) > last_bit;
+ unsigned long bit;
+ int ret = 0;
+
+ if (!ram_block_attributes_is_valid_range(attr, offset, size)) {
+ error_report("%s, invalid range: offset 0x%" PRIx64 ", size "
+ "0x%" PRIx64, __func__, offset, size);
+ return -EINVAL;
+ }
+
+ trace_ram_block_attributes_state_change(offset, size,
+ is_discarded ? "discarded" :
+ is_populated ? "populated" :
+ "mixture",
+ to_discard ? "discarded" :
+ "populated");
+ if (to_discard) {
+ if (is_discarded) {
+ /* Already private */
+ } else if (is_populated) {
+ /* Completely shared */
+ bitmap_clear(attr->bitmap, first_bit, nbits);
+ ram_block_attributes_notify_discard(attr, offset, size);
+ } else {
+ /* Unexpected mixture: process individual blocks */
+ for (bit = first_bit; bit < first_bit + nbits; bit++) {
+ if (!test_bit(bit, attr->bitmap)) {
+ continue;
+ }
+ clear_bit(bit, attr->bitmap);
+ ram_block_attributes_notify_discard(attr, bit * block_size,
+ block_size);
+ }
+ }
+ } else {
+ if (is_populated) {
+ /* Already shared */
+ } else if (is_discarded) {
+ /* Completely private */
+ bitmap_set(attr->bitmap, first_bit, nbits);
+ ret = ram_block_attributes_notify_populate(attr, offset, size);
+ } else {
+ /* Unexpected mixture: process individual blocks */
+ for (bit = first_bit; bit < first_bit + nbits; bit++) {
+ if (test_bit(bit, attr->bitmap)) {
+ continue;
+ }
+ set_bit(bit, attr->bitmap);
+ ret = ram_block_attributes_notify_populate(attr,
+ bit * block_size,
+ block_size);
+ if (ret) {
+ break;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block)
+{
+ const int block_size = qemu_real_host_page_size();
+ RamBlockAttributes *attr;
+ MemoryRegion *mr = ram_block->mr;
+
+ attr = RAM_BLOCK_ATTRIBUTES(object_new(TYPE_RAM_BLOCK_ATTRIBUTES));
+
+ attr->ram_block = ram_block;
+ if (memory_region_set_ram_discard_manager(mr, RAM_DISCARD_MANAGER(attr))) {
+ object_unref(OBJECT(attr));
+ return NULL;
+ }
+ attr->bitmap_size =
+ ROUND_UP(int128_get64(mr->size), block_size) / block_size;
+ attr->bitmap = bitmap_new(attr->bitmap_size);
+
+ return attr;
+}
+
+void ram_block_attributes_destroy(RamBlockAttributes *attr)
+{
+ g_assert(attr);
+
+ g_free(attr->bitmap);
+ memory_region_set_ram_discard_manager(attr->ram_block->mr, NULL);
+ object_unref(OBJECT(attr));
+}
+
+static void ram_block_attributes_init(Object *obj)
+{
+ RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(obj);
+
+ QLIST_INIT(&attr->rdl_list);
+}
+
+static void ram_block_attributes_finalize(Object *obj)
+{
+}
+
+static void ram_block_attributes_class_init(ObjectClass *klass,
+ const void *data)
+{
+ RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass);
+
+ rdmc->get_min_granularity = ram_block_attributes_rdm_get_min_granularity;
+ rdmc->register_listener = ram_block_attributes_rdm_register_listener;
+ rdmc->unregister_listener = ram_block_attributes_rdm_unregister_listener;
+ rdmc->is_populated = ram_block_attributes_rdm_is_populated;
+ rdmc->replay_populated = ram_block_attributes_rdm_replay_populated;
+ rdmc->replay_discarded = ram_block_attributes_rdm_replay_discarded;
+}
diff --git a/system/runstate.c b/system/runstate.c
index de74d96..38900c9 100644
--- a/system/runstate.c
+++ b/system/runstate.c
@@ -590,6 +590,58 @@ static void qemu_system_wakeup(void)
}
}
+static char *tdx_parse_panic_message(char *message)
+{
+ bool printable = false;
+ char *buf = NULL;
+ int len = 0, i;
+
+ /*
+ * Although message is defined as a json string, we shouldn't
+ * unconditionally treat it as is because the guest generated it and
+ * it's not necessarily trustable.
+ */
+ if (message) {
+ /* The caller guarantees the NULL-terminated string. */
+ len = strlen(message);
+
+ printable = len > 0;
+ for (i = 0; i < len; i++) {
+ if (!(0x20 <= message[i] && message[i] <= 0x7e)) {
+ printable = false;
+ break;
+ }
+ }
+ }
+
+ if (len == 0) {
+ buf = g_malloc(1);
+ buf[0] = '\0';
+ } else {
+ if (!printable) {
+ /* 3 = length of "%02x " */
+ buf = g_malloc(len * 3);
+ for (i = 0; i < len; i++) {
+ if (message[i] == '\0') {
+ break;
+ } else {
+ sprintf(buf + 3 * i, "%02x ", message[i]);
+ }
+ }
+ if (i > 0) {
+ /* replace the last ' '(space) to NULL */
+ buf[i * 3 - 1] = '\0';
+ } else {
+ buf[0] = '\0';
+ }
+ } else {
+ buf = g_strdup(message);
+ }
+ }
+
+ return buf;
+}
+
void qemu_system_guest_panicked(GuestPanicInformation *info)
{
qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed");
@@ -631,7 +683,20 @@ void qemu_system_guest_panicked(GuestPanicInformation *info)
S390CrashReason_str(info->u.s390.reason),
info->u.s390.psw_mask,
info->u.s390.psw_addr);
+ } else if (info->type == GUEST_PANIC_INFORMATION_TYPE_TDX) {
+ char *message = tdx_parse_panic_message(info->u.tdx.message);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "\nTDX guest reports fatal error."
+ " error code: 0x%" PRIx32 " error message:\"%s\"\n",
+ info->u.tdx.error_code, message);
+ g_free(message);
+ if (info->u.tdx.gpa != -1ull) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Additional error information "
+ "can be found at gpa page: 0x%" PRIx64 "\n",
+ info->u.tdx.gpa);
+ }
}
+
qapi_free_GuestPanicInformation(info);
}
}
diff --git a/system/trace-events b/system/trace-events
index be12ebf..82856e4 100644
--- a/system/trace-events
+++ b/system/trace-events
@@ -52,3 +52,6 @@ dirtylimit_state_finalize(void)
dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us"
dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64
dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us"
+
+# ram-block-attributes.c
+ram_block_attributes_state_change(uint64_t offset, uint64_t size, const char *from, const char *to) "offset 0x%"PRIx64" size 0x%"PRIx64" from '%s' to '%s'"
diff --git a/system/vl.c b/system/vl.c
index fd402b8..3b7057e 100644
--- a/system/vl.c
+++ b/system/vl.c
@@ -1192,10 +1192,7 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp)
return -1;
}
}
- /* For legacy, keep user files in a specific global order. */
- fw_cfg_set_order_override(fw_cfg, FW_CFG_ORDER_OVERRIDE_USER);
fw_cfg_add_file(fw_cfg, name, buf, size);
- fw_cfg_reset_order_override(fw_cfg);
return 0;
}
@@ -2745,7 +2742,6 @@ static void qemu_create_cli_devices(void)
}
/* init generic devices */
- rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE);
qemu_opts_foreach(qemu_find_opts("device"),
device_init_func, NULL, &error_fatal);
QTAILQ_FOREACH(opt, &device_opts, next) {
@@ -2756,7 +2752,6 @@ static void qemu_create_cli_devices(void)
assert(ret_data == NULL); /* error_fatal aborts */
loc_pop(&opt->loc);
}
- rom_reset_order_override();
}
static bool qemu_machine_creation_done(Error **errp)