aboutsummaryrefslogtreecommitdiff
path: root/hw/mem
diff options
context:
space:
mode:
Diffstat (limited to 'hw/mem')
-rw-r--r--hw/mem/cxl_type3.c159
-rw-r--r--hw/mem/memory-device.c4
-rw-r--r--hw/mem/npcm7xx_mc.c2
-rw-r--r--hw/mem/nvdimm.c7
-rw-r--r--hw/mem/pc-dimm.c11
-rw-r--r--hw/mem/sparse-mem.c8
6 files changed, 134 insertions, 57 deletions
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 35ac598..94e7274 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -17,6 +17,7 @@
#include "hw/mem/pc-dimm.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -24,11 +25,19 @@
#include "qemu/range.h"
#include "qemu/rcu.h"
#include "qemu/guest-random.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "hw/cxl/cxl.h"
#include "hw/pci/msix.h"
+/* type3 device private */
+enum CXL_T3_MSIX_VECTOR {
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS = 0,
+ CXL_T3_MSIX_EVENT_START = 2,
+ CXL_T3_MSIX_MBOX = CXL_T3_MSIX_EVENT_START + CXL_EVENT_TYPE_MAX,
+ CXL_T3_MSIX_VECTOR_NR
+};
+
#define DWORD_BYTE 4
#define CXL_CAPACITY_MULTIPLIER (256 * MiB)
@@ -737,6 +746,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "volatile memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostvmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostvmem)));
+ return false;
+ }
memory_region_set_nonvolatile(vmr, false);
memory_region_set_enabled(vmr, true);
host_memory_backend_set_mapped(ct3d->hostvmem, true);
@@ -760,6 +774,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "persistent memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostpmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostpmem)));
+ return false;
+ }
memory_region_set_nonvolatile(pmr, true);
memory_region_set_enabled(pmr, true);
host_memory_backend_set_mapped(ct3d->hostpmem, true);
@@ -790,6 +809,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->dc.host_dc)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->dc.host_dc)));
+ return false;
+ }
/*
* Set DC regions as volatile for now, non-volatile support can
* be added in the future if needed.
@@ -819,6 +843,19 @@ static DOEProtocol doe_cdat_prot[] = {
{ }
};
+/* Initialize CXL device alerts with default threshold values. */
+static void init_alert_config(CXLType3Dev *ct3d)
+{
+ ct3d->alert_config = (CXLAlertConfig) {
+ .life_used_crit_alert_thresh = 75,
+ .life_used_warn_thresh = 40,
+ .over_temp_crit_alert_thresh = 35,
+ .under_temp_crit_alert_thresh = 10,
+ .over_temp_warn_thresh = 25,
+ .under_temp_warn_thresh = 20
+ };
+}
+
static void ct3_realize(PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@@ -827,8 +864,8 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
ComponentRegisters *regs = &cxl_cstate->crb;
MemoryRegion *mr = &regs->component_registers;
uint8_t *pci_conf = pci_dev->config;
- unsigned short msix_num = 6;
int i, rc;
+ uint16_t count;
QTAILQ_INIT(&ct3d->error_list);
@@ -867,39 +904,63 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
&ct3d->cxl_dstate.device_registers);
/* MSI(-X) Initialization */
- rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
+ rc = msix_init_exclusive_bar(pci_dev, CXL_T3_MSIX_VECTOR_NR, 4, errp);
if (rc) {
- goto err_address_space_free;
+ goto err_free_special_ops;
}
- for (i = 0; i < msix_num; i++) {
+ for (i = 0; i < CXL_T3_MSIX_VECTOR_NR; i++) {
msix_vector_use(pci_dev, i);
}
/* DOE Initialization */
- pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
+ pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true,
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS);
cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
if (!cxl_doe_cdat_init(cxl_cstate, errp)) {
- goto err_free_special_ops;
+ goto err_msix_uninit;
}
+ init_alert_config(ct3d);
pcie_cap_deverr_init(pci_dev);
/* Leave a bit of room for expansion */
- rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
+ rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, errp);
if (rc) {
goto err_release_cdat;
}
- cxl_event_init(&ct3d->cxl_dstate, 2);
+ cxl_event_init(&ct3d->cxl_dstate, CXL_T3_MSIX_EVENT_START);
+
+ /* Set default value for patrol scrub attributes */
+ ct3d->patrol_scrub_attrs.scrub_cycle_cap =
+ CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT |
+ CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->patrol_scrub_attrs.scrub_cycle =
+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT |
+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8);
+ ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT;
+
+ /* Set default value for DDR5 ECS read attributes */
+ ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT;
+ for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
+ ct3d->ecs_attrs.fru_attrs[count].ecs_cap =
+ CXL_ECS_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->ecs_attrs.fru_attrs[count].ecs_config =
+ CXL_ECS_THRESHOLD_COUNT_DEFAULT |
+ (CXL_ECS_MODE_DEFAULT << 3);
+ /* Reserved */
+ ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0;
+ }
return;
err_release_cdat:
cxl_doe_cdat_release(cxl_cstate);
+err_msix_uninit:
+ msix_uninit_exclusive_bar(pci_dev);
err_free_special_ops:
g_free(regs->special_ops);
-err_address_space_free:
if (ct3d->dc.host_dc) {
cxl_destroy_dc_regions(ct3d);
address_space_destroy(&ct3d->dc.host_dc_as);
@@ -910,7 +971,6 @@ err_address_space_free:
if (ct3d->hostvmem) {
address_space_destroy(&ct3d->hostvmem_as);
}
- return;
}
static void ct3_exit(PCIDevice *pci_dev)
@@ -921,7 +981,9 @@ static void ct3_exit(PCIDevice *pci_dev)
pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate);
+ msix_uninit_exclusive_bar(pci_dev);
g_free(regs->special_ops);
+ cxl_destroy_cci(&ct3d->cci);
if (ct3d->dc.host_dc) {
cxl_destroy_dc_regions(ct3d);
address_space_destroy(&ct3d->dc.host_dc_as);
@@ -1052,10 +1114,17 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
continue;
}
- *dpa = dpa_base +
- ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
- ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
- >> iw));
+ if (iw < 8) {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
+ >> iw));
+ } else {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((((MAKE_64BIT_MASK(ig + iw, 64 - ig - iw) & hpa_offset)
+ >> (ig + iw)) / 3) << (ig + 8)));
+ }
return true;
}
@@ -1127,7 +1196,7 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
qemu_guest_getrandom_nofail(data, size);
return MEMTX_OK;
}
@@ -1149,7 +1218,7 @@ MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
return MEMTX_OK;
}
@@ -1162,22 +1231,28 @@ static void ct3d_reset(DeviceState *dev)
uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
+ pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed);
cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
- cxl_device_register_init_t3(ct3d);
+ cxl_device_register_init_t3(ct3d, CXL_T3_MSIX_MBOX);
/*
* Bring up an endpoint to target with MCTP over VDM.
* This device is emulating an MLD with single LD for now.
*/
+ if (ct3d->vdm_fm_owned_ld_mctp_cci.initialized) {
+ cxl_destroy_cci(&ct3d->vdm_fm_owned_ld_mctp_cci);
+ }
cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
+ if (ct3d->ld0_cci.initialized) {
+ cxl_destroy_cci(&ct3d->ld0_cci);
+ }
cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
-
}
-static Property ct3_props[] = {
+static const Property ct3_props[] = {
DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
HostMemoryBackend *), /* for backward compatibility */
DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
@@ -1191,7 +1266,10 @@ static Property ct3_props[] = {
DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev,
+ speed, PCIE_LINK_SPEED_32),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev,
+ width, PCIE_LINK_WIDTH_16),
};
static uint64_t get_lsa_size(CXLType3Dev *ct3d)
@@ -1304,6 +1382,12 @@ void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
cxl_device_get_timestamp(&ct3d->cxl_dstate);
}
+void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d)
+{
+ ct3d->poison_list_overflowed = false;
+ ct3d->poison_list_overflow_ts = 0;
+}
+
void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
Error **errp)
{
@@ -1331,28 +1415,28 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
ct3d = CXL_TYPE3(obj);
QLIST_FOREACH(p, &ct3d->poison_list, node) {
- if (((start >= p->start) && (start < p->start + p->length)) ||
- ((start + length > p->start) &&
- (start + length <= p->start + p->length))) {
+ if ((start < p->start + p->length) && (start + length > p->start)) {
error_setg(errp,
"Overlap with existing poisoned region not supported");
return;
}
}
- if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
- cxl_set_poison_list_overflowed(ct3d);
- return;
- }
-
p = g_new0(CXLPoison, 1);
p->length = length;
p->start = start;
/* Different from injected via the mbox */
p->type = CXL_POISON_TYPE_INTERNAL;
- QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
- ct3d->poison_list_cnt++;
+ if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
+ QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
+ ct3d->poison_list_cnt++;
+ } else {
+ if (!ct3d->poison_list_overflowed) {
+ cxl_set_poison_list_overflowed(ct3d);
+ }
+ QLIST_INSERT_HEAD(&ct3d->poison_list_bkp, p, node);
+ }
}
/* For uncorrectable errors include support for multiple header recording */
@@ -1446,8 +1530,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path,
stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
pcie_aer_inject_error(PCI_DEVICE(obj), &err);
-
- return;
}
void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
@@ -1723,7 +1805,6 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
cxl_event_irq_assert(ct3d);
}
- return;
}
void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
@@ -2014,11 +2095,11 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
stw_le_p(&dCap.host_id, hid);
/* only valid for DC_REGION_CONFIG_UPDATED event */
dCap.updated_region_id = 0;
- dCap.flags = 0;
for (i = 0; i < num_extents; i++) {
memcpy(&dCap.dynamic_capacity_extent, &extents[i],
sizeof(CXLDCExtentRaw));
+ dCap.flags = 0;
if (i < num_extents - 1) {
/* Set "More" flag */
dCap.flags |= BIT(0);
@@ -2080,7 +2161,7 @@ void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id,
}
}
-static void ct3_class_init(ObjectClass *oc, void *data)
+static void ct3_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -2098,7 +2179,7 @@ static void ct3_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "CXL Memory Device (Type 3)";
- dc->reset = ct3d_reset;
+ device_class_set_legacy_reset(dc, ct3d_reset);
device_class_set_props(dc, ct3_props);
cvc->get_lsa_size = get_lsa_size;
@@ -2113,7 +2194,7 @@ static const TypeInfo ct3d_info = {
.class_size = sizeof(struct CXLType3Class),
.class_init = ct3_class_init,
.instance_size = sizeof(CXLType3Dev),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CXL_DEVICE },
{ INTERFACE_PCIE_DEVICE },
{}
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index a5f279a..1a432e9 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -16,8 +16,8 @@
#include "hw/boards.h"
#include "qemu/range.h"
#include "hw/virtio/vhost.h"
-#include "sysemu/kvm.h"
-#include "exec/address-spaces.h"
+#include "system/kvm.h"
+#include "system/address-spaces.h"
#include "trace.h"
static bool memory_device_is_empty(const MemoryDeviceState *md)
diff --git a/hw/mem/npcm7xx_mc.c b/hw/mem/npcm7xx_mc.c
index abc5af5..07fc108 100644
--- a/hw/mem/npcm7xx_mc.c
+++ b/hw/mem/npcm7xx_mc.c
@@ -65,7 +65,7 @@ static void npcm7xx_mc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
}
-static void npcm7xx_mc_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_mc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 1631a7d..23ab143e 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -30,7 +30,7 @@
#include "hw/mem/nvdimm.h"
#include "hw/qdev-properties.h"
#include "hw/mem/memory-device.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
@@ -246,12 +246,11 @@ static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf,
memory_region_set_dirty(mr, backend_offset, size);
}
-static Property nvdimm_properties[] = {
+static const Property nvdimm_properties[] = {
DEFINE_PROP_BOOL(NVDIMM_UNARMED_PROP, NVDIMMDevice, unarmed, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvdimm_class_init(ObjectClass *oc, void *data)
+static void nvdimm_class_init(ObjectClass *oc, const void *data)
{
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 27919ca..f701d5b 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -28,8 +28,8 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "trace.h"
static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
@@ -150,14 +150,13 @@ out:
return slot;
}
-static Property pc_dimm_properties[] = {
+static const Property pc_dimm_properties[] = {
DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0),
DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0),
DEFINE_PROP_INT32(PC_DIMM_SLOT_PROP, PCDIMMDevice, slot,
PC_DIMM_UNASSIGNED_SLOT),
DEFINE_PROP_LINK(PC_DIMM_MEMDEV_PROP, PCDIMMDevice, hostmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name,
@@ -277,7 +276,7 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md,
}
}
-static void pc_dimm_class_init(ObjectClass *oc, void *data)
+static void pc_dimm_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
@@ -302,7 +301,7 @@ static const TypeInfo pc_dimm_info = {
.instance_init = pc_dimm_init,
.class_init = pc_dimm_class_init,
.class_size = sizeof(PCDIMMDeviceClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_MEMORY_DEVICE },
{ }
},
diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c
index 6e8f4f8..d7b00e5 100644
--- a/hw/mem/sparse-mem.c
+++ b/hw/mem/sparse-mem.c
@@ -17,7 +17,7 @@
#include "hw/sysbus.h"
#include "qapi/error.h"
#include "qemu/units.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/mem/sparse-mem.h"
#define SPARSE_MEM(obj) OBJECT_CHECK(SparseMemState, (obj), TYPE_SPARSE_MEM)
@@ -82,7 +82,6 @@ static void sparse_mem_enter_reset(Object *obj, ResetType type)
{
SparseMemState *s = SPARSE_MEM(obj);
g_hash_table_remove_all(s->mapped);
- return;
}
static const MemoryRegionOps sparse_mem_ops = {
@@ -96,14 +95,13 @@ static const MemoryRegionOps sparse_mem_ops = {
},
};
-static Property sparse_mem_properties[] = {
+static const Property sparse_mem_properties[] = {
/* The base address of the memory */
DEFINE_PROP_UINT64("baseaddr", SparseMemState, baseaddr, 0x0),
/* The length of the sparse memory region */
DEFINE_PROP_UINT64("length", SparseMemState, length, UINT64_MAX),
/* Max amount of actual memory that can be used to back the sparse memory */
DEFINE_PROP_UINT64("maxsize", SparseMemState, maxsize, 10 * MiB),
- DEFINE_PROP_END_OF_LIST(),
};
MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length)
@@ -138,7 +136,7 @@ static void sparse_mem_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->mmio);
}
-static void sparse_mem_class_init(ObjectClass *klass, void *data)
+static void sparse_mem_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);