aboutsummaryrefslogtreecommitdiff
path: root/hw/mem
diff options
context:
space:
mode:
Diffstat (limited to 'hw/mem')
-rw-r--r--hw/mem/cxl_type3.c794
-rw-r--r--hw/mem/cxl_type3_stubs.c25
-rw-r--r--hw/mem/memory-device.c4
-rw-r--r--hw/mem/npcm7xx_mc.c2
-rw-r--r--hw/mem/nvdimm.c7
-rw-r--r--hw/mem/pc-dimm.c11
-rw-r--r--hw/mem/sparse-mem.c8
7 files changed, 757 insertions, 94 deletions
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 3e42490..94e7274 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -17,6 +17,7 @@
#include "hw/mem/pc-dimm.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -24,12 +25,21 @@
#include "qemu/range.h"
#include "qemu/rcu.h"
#include "qemu/guest-random.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "hw/cxl/cxl.h"
#include "hw/pci/msix.h"
+/* type3 device private */
+enum CXL_T3_MSIX_VECTOR {
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS = 0,
+ CXL_T3_MSIX_EVENT_START = 2,
+ CXL_T3_MSIX_MBOX = CXL_T3_MSIX_EVENT_START + CXL_EVENT_TYPE_MAX,
+ CXL_T3_MSIX_VECTOR_NR
+};
+
#define DWORD_BYTE 4
+#define CXL_CAPACITY_MULTIPLIER (256 * MiB)
/* Default CDAT entries for a memory region */
enum {
@@ -43,8 +53,9 @@ enum {
};
static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
- int dsmad_handle, MemoryRegion *mr,
- bool is_pmem, uint64_t dpa_base)
+ int dsmad_handle, uint64_t size,
+ bool is_pmem, bool is_dynamic,
+ uint64_t dpa_base)
{
CDATDsmas *dsmas;
CDATDslbis *dslbis0;
@@ -60,9 +71,10 @@ static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
.length = sizeof(*dsmas),
},
.DSMADhandle = dsmad_handle,
- .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
+ .flags = (is_pmem ? CDAT_DSMAS_FLAG_NV : 0) |
+ (is_dynamic ? CDAT_DSMAS_FLAG_DYNAMIC_CAP : 0),
.DPA_base = dpa_base,
- .DPA_length = memory_region_size(mr),
+ .DPA_length = size,
};
/* For now, no memory side cache, plausiblish numbers */
@@ -131,7 +143,7 @@ static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
*/
.EFI_memory_type_attr = is_pmem ? 2 : 1,
.DPA_offset = 0,
- .DPA_length = memory_region_size(mr),
+ .DPA_length = size,
};
/* Header always at start of structure */
@@ -148,11 +160,13 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
g_autofree CDATSubHeader **table = NULL;
CXLType3Dev *ct3d = priv;
MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
+ MemoryRegion *dc_mr = NULL;
+ uint64_t vmr_size = 0, pmr_size = 0;
int dsmad_handle = 0;
int cur_ent = 0;
int len = 0;
- if (!ct3d->hostpmem && !ct3d->hostvmem) {
+ if (!ct3d->hostpmem && !ct3d->hostvmem && !ct3d->dc.num_regions) {
return 0;
}
@@ -162,6 +176,7 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
return -EINVAL;
}
len += CT3_CDAT_NUM_ENTRIES;
+ vmr_size = memory_region_size(volatile_mr);
}
if (ct3d->hostpmem) {
@@ -170,23 +185,57 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
return -EINVAL;
}
len += CT3_CDAT_NUM_ENTRIES;
+ pmr_size = memory_region_size(nonvolatile_mr);
+ }
+
+ if (ct3d->dc.num_regions) {
+ if (!ct3d->dc.host_dc) {
+ return -EINVAL;
+ }
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ if (!dc_mr) {
+ return -EINVAL;
+ }
+ len += CT3_CDAT_NUM_ENTRIES * ct3d->dc.num_regions;
}
table = g_malloc0(len * sizeof(*table));
/* Now fill them in */
if (volatile_mr) {
- ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
- false, 0);
+ ct3_build_cdat_entries_for_mr(table, dsmad_handle++, vmr_size,
+ false, false, 0);
cur_ent = CT3_CDAT_NUM_ENTRIES;
}
if (nonvolatile_mr) {
- uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
+ uint64_t base = vmr_size;
ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
- nonvolatile_mr, true, base);
+ pmr_size, true, false, base);
cur_ent += CT3_CDAT_NUM_ENTRIES;
}
+
+ if (dc_mr) {
+ int i;
+ uint64_t region_base = vmr_size + pmr_size;
+
+ /*
+ * We assume the dynamic capacity to be volatile for now.
+ * Non-volatile dynamic capacity will be added if needed in the
+ * future.
+ */
+ for (i = 0; i < ct3d->dc.num_regions; i++) {
+ ct3_build_cdat_entries_for_mr(&(table[cur_ent]),
+ dsmad_handle++,
+ ct3d->dc.regions[i].len,
+ false, true, region_base);
+ ct3d->dc.regions[i].dsmadhandle = dsmad_handle - 1;
+
+ cur_ent += CT3_CDAT_NUM_ENTRIES;
+ region_base += ct3d->dc.regions[i].len;
+ }
+ }
+
assert(len == cur_ent);
*cdat_table = g_steal_pointer(&table);
@@ -297,10 +346,17 @@ static void build_dvsecs(CXLType3Dev *ct3d)
range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
}
- } else {
+ } else if (ct3d->hostpmem) {
range1_size_hi = ct3d->hostpmem->size >> 32;
range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
+ } else {
+ /*
+ * For DCD with no static memory, set memory active, memory class bits.
+ * No range is set.
+ */
+ range1_size_hi = 0;
+ range1_size_lo = (2 << 5) | (2 << 2) | 0x3;
}
dvsec = (uint8_t *)&(CXLDVSECDevice){
@@ -567,11 +623,103 @@ static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
}
}
+/*
+ * TODO: dc region configuration will be updated once host backend and address
+ * space support is added for DCD.
+ */
+static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
+{
+ int i;
+ uint64_t region_base = 0;
+ uint64_t region_len;
+ uint64_t decode_len;
+ uint64_t blk_size = 2 * MiB;
+ CXLDCRegion *region;
+ MemoryRegion *mr;
+ uint64_t dc_size;
+
+ mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(mr);
+ region_len = DIV_ROUND_UP(dc_size, ct3d->dc.num_regions);
+
+ if (dc_size % (ct3d->dc.num_regions * CXL_CAPACITY_MULTIPLIER) != 0) {
+ error_setg(errp,
+ "backend size is not multiple of region len: 0x%" PRIx64,
+ region_len);
+ return false;
+ }
+ if (region_len % CXL_CAPACITY_MULTIPLIER != 0) {
+ error_setg(errp, "DC region size is unaligned to 0x%" PRIx64,
+ CXL_CAPACITY_MULTIPLIER);
+ return false;
+ }
+ decode_len = region_len;
+
+ if (ct3d->hostvmem) {
+ mr = host_memory_backend_get_memory(ct3d->hostvmem);
+ region_base += memory_region_size(mr);
+ }
+ if (ct3d->hostpmem) {
+ mr = host_memory_backend_get_memory(ct3d->hostpmem);
+ region_base += memory_region_size(mr);
+ }
+ if (region_base % CXL_CAPACITY_MULTIPLIER != 0) {
+ error_setg(errp, "DC region base not aligned to 0x%" PRIx64,
+ CXL_CAPACITY_MULTIPLIER);
+ return false;
+ }
+
+ for (i = 0, region = &ct3d->dc.regions[0];
+ i < ct3d->dc.num_regions;
+ i++, region++, region_base += region_len) {
+ *region = (CXLDCRegion) {
+ .base = region_base,
+ .decode_len = decode_len,
+ .len = region_len,
+ .block_size = blk_size,
+ /* dsmad_handle set when creating CDAT table entries */
+ .flags = 0,
+ };
+ ct3d->dc.total_capacity += region->len;
+ region->blk_bitmap = bitmap_new(region->len / region->block_size);
+ }
+ QTAILQ_INIT(&ct3d->dc.extents);
+ QTAILQ_INIT(&ct3d->dc.extents_pending);
+
+ return true;
+}
+
+static void cxl_destroy_dc_regions(CXLType3Dev *ct3d)
+{
+ CXLDCExtent *ent, *ent_next;
+ CXLDCExtentGroup *group, *group_next;
+ int i;
+ CXLDCRegion *region;
+
+ QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
+ cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
+ }
+
+ QTAILQ_FOREACH_SAFE(group, &ct3d->dc.extents_pending, node, group_next) {
+ QTAILQ_REMOVE(&ct3d->dc.extents_pending, group, node);
+ QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
+ cxl_remove_extent_from_extent_list(&group->list, ent);
+ }
+ g_free(group);
+ }
+
+ for (i = 0; i < ct3d->dc.num_regions; i++) {
+ region = &ct3d->dc.regions[i];
+ g_free(region->blk_bitmap);
+ }
+}
+
static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
{
DeviceState *ds = DEVICE(ct3d);
- if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
+ if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem
+ && !ct3d->dc.num_regions) {
error_setg(errp, "at least one memdev property must be set");
return false;
} else if (ct3d->hostmem && ct3d->hostpmem) {
@@ -598,6 +746,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "volatile memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostvmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostvmem)));
+ return false;
+ }
memory_region_set_nonvolatile(vmr, false);
memory_region_set_enabled(vmr, true);
host_memory_backend_set_mapped(ct3d->hostvmem, true);
@@ -608,7 +761,7 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
}
address_space_init(&ct3d->hostvmem_as, vmr, v_name);
ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
- ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
+ ct3d->cxl_dstate.static_mem_size += memory_region_size(vmr);
g_free(v_name);
}
@@ -621,6 +774,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
error_setg(errp, "persistent memdev must have backing device");
return false;
}
+ if (host_memory_backend_is_mapped(ct3d->hostpmem)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->hostpmem)));
+ return false;
+ }
memory_region_set_nonvolatile(pmr, true);
memory_region_set_enabled(pmr, true);
host_memory_backend_set_mapped(ct3d->hostpmem, true);
@@ -631,10 +789,52 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
}
address_space_init(&ct3d->hostpmem_as, pmr, p_name);
ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
- ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
+ ct3d->cxl_dstate.static_mem_size += memory_region_size(pmr);
g_free(p_name);
}
+ ct3d->dc.total_capacity = 0;
+ if (ct3d->dc.num_regions > 0) {
+ MemoryRegion *dc_mr;
+ char *dc_name;
+
+ if (!ct3d->dc.host_dc) {
+ error_setg(errp, "dynamic capacity must have a backing device");
+ return false;
+ }
+
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ if (!dc_mr) {
+ error_setg(errp, "dynamic capacity must have a backing device");
+ return false;
+ }
+
+ if (host_memory_backend_is_mapped(ct3d->dc.host_dc)) {
+ error_setg(errp, "memory backend %s can't be used multiple times.",
+ object_get_canonical_path_component(OBJECT(ct3d->dc.host_dc)));
+ return false;
+ }
+ /*
+ * Set DC regions as volatile for now, non-volatile support can
+ * be added in the future if needed.
+ */
+ memory_region_set_nonvolatile(dc_mr, false);
+ memory_region_set_enabled(dc_mr, true);
+ host_memory_backend_set_mapped(ct3d->dc.host_dc, true);
+ if (ds->id) {
+ dc_name = g_strdup_printf("cxl-dcd-dpa-dc-space:%s", ds->id);
+ } else {
+ dc_name = g_strdup("cxl-dcd-dpa-dc-space");
+ }
+ address_space_init(&ct3d->dc.host_dc_as, dc_mr, dc_name);
+ g_free(dc_name);
+
+ if (!cxl_create_dc_regions(ct3d, errp)) {
+ error_append_hint(errp, "setup DC regions failed");
+ return false;
+ }
+ }
+
return true;
}
@@ -643,6 +843,19 @@ static DOEProtocol doe_cdat_prot[] = {
{ }
};
+/* Initialize CXL device alerts with default threshold values. */
+static void init_alert_config(CXLType3Dev *ct3d)
+{
+ ct3d->alert_config = (CXLAlertConfig) {
+ .life_used_crit_alert_thresh = 75,
+ .life_used_warn_thresh = 40,
+ .over_temp_crit_alert_thresh = 35,
+ .under_temp_crit_alert_thresh = 10,
+ .over_temp_warn_thresh = 25,
+ .under_temp_warn_thresh = 20
+ };
+}
+
static void ct3_realize(PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@@ -651,8 +864,8 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
ComponentRegisters *regs = &cxl_cstate->crb;
MemoryRegion *mr = &regs->component_registers;
uint8_t *pci_conf = pci_dev->config;
- unsigned short msix_num = 6;
int i, rc;
+ uint16_t count;
QTAILQ_INIT(&ct3d->error_list);
@@ -691,46 +904,73 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
&ct3d->cxl_dstate.device_registers);
/* MSI(-X) Initialization */
- rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
+ rc = msix_init_exclusive_bar(pci_dev, CXL_T3_MSIX_VECTOR_NR, 4, errp);
if (rc) {
- goto err_address_space_free;
+ goto err_free_special_ops;
}
- for (i = 0; i < msix_num; i++) {
+ for (i = 0; i < CXL_T3_MSIX_VECTOR_NR; i++) {
msix_vector_use(pci_dev, i);
}
/* DOE Initialization */
- pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
+ pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true,
+ CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS);
cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
if (!cxl_doe_cdat_init(cxl_cstate, errp)) {
- goto err_free_special_ops;
+ goto err_msix_uninit;
}
+ init_alert_config(ct3d);
pcie_cap_deverr_init(pci_dev);
/* Leave a bit of room for expansion */
- rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
+ rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, errp);
if (rc) {
goto err_release_cdat;
}
- cxl_event_init(&ct3d->cxl_dstate, 2);
+ cxl_event_init(&ct3d->cxl_dstate, CXL_T3_MSIX_EVENT_START);
+
+ /* Set default value for patrol scrub attributes */
+ ct3d->patrol_scrub_attrs.scrub_cycle_cap =
+ CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT |
+ CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->patrol_scrub_attrs.scrub_cycle =
+ CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT |
+ (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8);
+ ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT;
+
+ /* Set default value for DDR5 ECS read attributes */
+ ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT;
+ for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
+ ct3d->ecs_attrs.fru_attrs[count].ecs_cap =
+ CXL_ECS_REALTIME_REPORT_CAP_DEFAULT;
+ ct3d->ecs_attrs.fru_attrs[count].ecs_config =
+ CXL_ECS_THRESHOLD_COUNT_DEFAULT |
+ (CXL_ECS_MODE_DEFAULT << 3);
+ /* Reserved */
+ ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0;
+ }
return;
err_release_cdat:
cxl_doe_cdat_release(cxl_cstate);
+err_msix_uninit:
+ msix_uninit_exclusive_bar(pci_dev);
err_free_special_ops:
g_free(regs->special_ops);
-err_address_space_free:
+ if (ct3d->dc.host_dc) {
+ cxl_destroy_dc_regions(ct3d);
+ address_space_destroy(&ct3d->dc.host_dc_as);
+ }
if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
if (ct3d->hostvmem) {
address_space_destroy(&ct3d->hostvmem_as);
}
- return;
}
static void ct3_exit(PCIDevice *pci_dev)
@@ -741,7 +981,13 @@ static void ct3_exit(PCIDevice *pci_dev)
pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate);
+ msix_uninit_exclusive_bar(pci_dev);
g_free(regs->special_ops);
+ cxl_destroy_cci(&ct3d->cci);
+ if (ct3d->dc.host_dc) {
+ cxl_destroy_dc_regions(ct3d);
+ address_space_destroy(&ct3d->dc.host_dc_as);
+ }
if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
@@ -750,6 +996,70 @@ static void ct3_exit(PCIDevice *pci_dev)
}
}
+/*
+ * Mark the DPA range [dpa, dap + len - 1] to be backed and accessible. This
+ * happens when a DC extent is added and accepted by the host.
+ */
+void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
+ uint64_t len)
+{
+ CXLDCRegion *region;
+
+ region = cxl_find_dc_region(ct3d, dpa, len);
+ if (!region) {
+ return;
+ }
+
+ bitmap_set(region->blk_bitmap, (dpa - region->base) / region->block_size,
+ len / region->block_size);
+}
+
+/*
+ * Check whether the DPA range [dpa, dpa + len - 1] is backed with DC extents.
+ * Used when validating read/write to dc regions
+ */
+bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
+ uint64_t len)
+{
+ CXLDCRegion *region;
+ uint64_t nbits;
+ long nr;
+
+ region = cxl_find_dc_region(ct3d, dpa, len);
+ if (!region) {
+ return false;
+ }
+
+ nr = (dpa - region->base) / region->block_size;
+ nbits = DIV_ROUND_UP(len, region->block_size);
+ /*
+ * if bits between [dpa, dpa + len) are all 1s, meaning the DPA range is
+ * backed with DC extents, return true; else return false.
+ */
+ return find_next_zero_bit(region->blk_bitmap, nr + nbits, nr) == nr + nbits;
+}
+
+/*
+ * Mark the DPA range [dpa, dap + len - 1] to be unbacked and inaccessible.
+ * This happens when a dc extent is released by the host.
+ */
+void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
+ uint64_t len)
+{
+ CXLDCRegion *region;
+ uint64_t nbits;
+ long nr;
+
+ region = cxl_find_dc_region(ct3d, dpa, len);
+ if (!region) {
+ return;
+ }
+
+ nr = (dpa - region->base) / region->block_size;
+ nbits = len / region->block_size;
+ bitmap_clear(region->blk_bitmap, nr, nbits);
+}
+
static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
{
int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
@@ -804,10 +1114,17 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
continue;
}
- *dpa = dpa_base +
- ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
- ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
- >> iw));
+ if (iw < 8) {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
+ >> iw));
+ } else {
+ *dpa = dpa_base +
+ ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((((MAKE_64BIT_MASK(ig + iw, 64 - ig - iw) & hpa_offset)
+ >> (ig + iw)) / 3) << (ig + 8)));
+ }
return true;
}
@@ -820,16 +1137,23 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
AddressSpace **as,
uint64_t *dpa_offset)
{
- MemoryRegion *vmr = NULL, *pmr = NULL;
+ MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
+ uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
if (ct3d->hostvmem) {
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
+ vmr_size = memory_region_size(vmr);
}
if (ct3d->hostpmem) {
pmr = host_memory_backend_get_memory(ct3d->hostpmem);
+ pmr_size = memory_region_size(pmr);
+ }
+ if (ct3d->dc.host_dc) {
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(dc_mr);
}
- if (!vmr && !pmr) {
+ if (!vmr && !pmr && !dc_mr) {
return -ENODEV;
}
@@ -837,19 +1161,22 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
return -EINVAL;
}
- if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
+ if (*dpa_offset >= vmr_size + pmr_size + dc_size) {
return -EINVAL;
}
- if (vmr) {
- if (*dpa_offset < memory_region_size(vmr)) {
- *as = &ct3d->hostvmem_as;
- } else {
- *as = &ct3d->hostpmem_as;
- *dpa_offset -= memory_region_size(vmr);
- }
- } else {
+ if (*dpa_offset < vmr_size) {
+ *as = &ct3d->hostvmem_as;
+ } else if (*dpa_offset < vmr_size + pmr_size) {
*as = &ct3d->hostpmem_as;
+ *dpa_offset -= vmr_size;
+ } else {
+ if (!ct3_test_region_block_backed(ct3d, *dpa_offset, size)) {
+ return -ENODEV;
+ }
+
+ *as = &ct3d->dc.host_dc_as;
+ *dpa_offset -= (vmr_size + pmr_size);
}
return 0;
@@ -869,7 +1196,7 @@ MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
qemu_guest_getrandom_nofail(data, size);
return MEMTX_OK;
}
@@ -891,7 +1218,7 @@ MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
return MEMTX_ERROR;
}
- if (sanitize_running(&ct3d->cci)) {
+ if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
return MEMTX_OK;
}
@@ -904,22 +1231,28 @@ static void ct3d_reset(DeviceState *dev)
uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
+ pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed);
cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
- cxl_device_register_init_t3(ct3d);
+ cxl_device_register_init_t3(ct3d, CXL_T3_MSIX_MBOX);
/*
* Bring up an endpoint to target with MCTP over VDM.
* This device is emulating an MLD with single LD for now.
*/
+ if (ct3d->vdm_fm_owned_ld_mctp_cci.initialized) {
+ cxl_destroy_cci(&ct3d->vdm_fm_owned_ld_mctp_cci);
+ }
cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
+ if (ct3d->ld0_cci.initialized) {
+ cxl_destroy_cci(&ct3d->ld0_cci);
+ }
cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
512); /* Max payload made up */
-
}
-static Property ct3_props[] = {
+static const Property ct3_props[] = {
DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
HostMemoryBackend *), /* for backward compatibility */
DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
@@ -930,7 +1263,13 @@ static Property ct3_props[] = {
HostMemoryBackend *),
DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
+ DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
+ TYPE_MEMORY_BACKEND, HostMemoryBackend *),
+ DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev,
+ speed, PCIE_LINK_SPEED_32),
+ DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev,
+ width, PCIE_LINK_WIDTH_16),
};
static uint64_t get_lsa_size(CXLType3Dev *ct3d)
@@ -996,36 +1335,42 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
{
- MemoryRegion *vmr = NULL, *pmr = NULL;
+ MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
AddressSpace *as;
+ uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
if (ct3d->hostvmem) {
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
+ vmr_size = memory_region_size(vmr);
}
if (ct3d->hostpmem) {
pmr = host_memory_backend_get_memory(ct3d->hostpmem);
+ pmr_size = memory_region_size(pmr);
}
+ if (ct3d->dc.host_dc) {
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(dc_mr);
+ }
- if (!vmr && !pmr) {
+ if (!vmr && !pmr && !dc_mr) {
return false;
}
- if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
+ if (dpa_offset + CXL_CACHE_LINE_SIZE > vmr_size + pmr_size + dc_size) {
return false;
}
- if (vmr) {
- if (dpa_offset < memory_region_size(vmr)) {
- as = &ct3d->hostvmem_as;
- } else {
- as = &ct3d->hostpmem_as;
- dpa_offset -= memory_region_size(vmr);
- }
- } else {
+ if (dpa_offset < vmr_size) {
+ as = &ct3d->hostvmem_as;
+ } else if (dpa_offset < vmr_size + pmr_size) {
as = &ct3d->hostpmem_as;
+ dpa_offset -= vmr_size;
+ } else {
+ as = &ct3d->dc.host_dc_as;
+ dpa_offset -= (vmr_size + pmr_size);
}
- address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
+ address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, data,
CXL_CACHE_LINE_SIZE);
return true;
}
@@ -1037,6 +1382,12 @@ void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
cxl_device_get_timestamp(&ct3d->cxl_dstate);
}
+void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d)
+{
+ ct3d->poison_list_overflowed = false;
+ ct3d->poison_list_overflow_ts = 0;
+}
+
void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
Error **errp)
{
@@ -1064,28 +1415,28 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
ct3d = CXL_TYPE3(obj);
QLIST_FOREACH(p, &ct3d->poison_list, node) {
- if (((start >= p->start) && (start < p->start + p->length)) ||
- ((start + length > p->start) &&
- (start + length <= p->start + p->length))) {
+ if ((start < p->start + p->length) && (start + length > p->start)) {
error_setg(errp,
"Overlap with existing poisoned region not supported");
return;
}
}
- if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
- cxl_set_poison_list_overflowed(ct3d);
- return;
- }
-
p = g_new0(CXLPoison, 1);
p->length = length;
p->start = start;
/* Different from injected via the mbox */
p->type = CXL_POISON_TYPE_INTERNAL;
- QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
- ct3d->poison_list_cnt++;
+ if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
+ QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
+ ct3d->poison_list_cnt++;
+ } else {
+ if (!ct3d->poison_list_overflowed) {
+ cxl_set_poison_list_overflowed(ct3d);
+ }
+ QLIST_INSERT_HEAD(&ct3d->poison_list_bkp, p, node);
+ }
}
/* For uncorrectable errors include support for multiple header recording */
@@ -1179,8 +1530,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path,
stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
pcie_aer_inject_error(PCI_DEVICE(obj), &err);
-
- return;
}
void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
@@ -1268,7 +1617,6 @@ static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
return CXL_EVENT_TYPE_FAIL;
case CXL_EVENT_LOG_FATAL:
return CXL_EVENT_TYPE_FATAL;
-/* DCD not yet supported */
default:
return -EINVAL;
}
@@ -1457,7 +1805,6 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
cxl_event_irq_assert(ct3d);
}
- return;
}
void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
@@ -1519,7 +1866,302 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
}
}
-static void ct3_class_init(ObjectClass *oc, void *data)
+/* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */
+static const QemuUUID dynamic_capacity_uuid = {
+ .data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f,
+ 0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a),
+};
+
+typedef enum CXLDCEventType {
+ DC_EVENT_ADD_CAPACITY = 0x0,
+ DC_EVENT_RELEASE_CAPACITY = 0x1,
+ DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2,
+ DC_EVENT_REGION_CONFIG_UPDATED = 0x3,
+ DC_EVENT_ADD_CAPACITY_RSP = 0x4,
+ DC_EVENT_CAPACITY_RELEASED = 0x5,
+} CXLDCEventType;
+
+/*
+ * Check whether the range [dpa, dpa + len - 1] has overlaps with extents in
+ * the list.
+ * Return value: return true if has overlaps; otherwise, return false
+ */
+static bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
+ uint64_t dpa, uint64_t len)
+{
+ CXLDCExtent *ent;
+ Range range1, range2;
+
+ if (!list) {
+ return false;
+ }
+
+ range_init_nofail(&range1, dpa, len);
+ QTAILQ_FOREACH(ent, list, node) {
+ range_init_nofail(&range2, ent->start_dpa, ent->len);
+ if (range_overlaps_range(&range1, &range2)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Check whether the range [dpa, dpa + len - 1] is contained by extents in
+ * the list.
+ * Will check multiple extents containment once superset release is added.
+ * Return value: return true if range is contained; otherwise, return false
+ */
+bool cxl_extents_contains_dpa_range(CXLDCExtentList *list,
+ uint64_t dpa, uint64_t len)
+{
+ CXLDCExtent *ent;
+ Range range1, range2;
+
+ if (!list) {
+ return false;
+ }
+
+ range_init_nofail(&range1, dpa, len);
+ QTAILQ_FOREACH(ent, list, node) {
+ range_init_nofail(&range2, ent->start_dpa, ent->len);
+ if (range_contains_range(&range2, &range1)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
+ uint64_t dpa, uint64_t len)
+{
+ CXLDCExtentGroup *group;
+
+ if (!list) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(group, list, node) {
+ if (cxl_extents_overlaps_dpa_range(&group->list, dpa, len)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * The main function to process dynamic capacity event with extent list.
+ * Currently DC extents add/release requests are processed.
+ */
+static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
+ uint16_t hid, CXLDCEventType type, uint8_t rid,
+ CxlDynamicCapacityExtentList *records, Error **errp)
+{
+ Object *obj;
+ CXLEventDynamicCapacity dCap = {};
+ CXLEventRecordHdr *hdr = &dCap.hdr;
+ CXLType3Dev *dcd;
+ uint8_t flags = 1 << CXL_EVENT_TYPE_INFO;
+ uint32_t num_extents = 0;
+ CxlDynamicCapacityExtentList *list;
+ CXLDCExtentGroup *group = NULL;
+ g_autofree CXLDCExtentRaw *extents = NULL;
+ uint8_t enc_log = CXL_EVENT_TYPE_DYNAMIC_CAP;
+ uint64_t dpa, offset, len, block_size;
+ g_autofree unsigned long *blk_bitmap = NULL;
+ int i;
+
+ obj = object_resolve_path_type(path, TYPE_CXL_TYPE3, NULL);
+ if (!obj) {
+ error_setg(errp, "Unable to resolve CXL type 3 device");
+ return;
+ }
+
+ dcd = CXL_TYPE3(obj);
+ if (!dcd->dc.num_regions) {
+ error_setg(errp, "No dynamic capacity support from the device");
+ return;
+ }
+
+
+ if (rid >= dcd->dc.num_regions) {
+ error_setg(errp, "region id is too large");
+ return;
+ }
+ block_size = dcd->dc.regions[rid].block_size;
+ blk_bitmap = bitmap_new(dcd->dc.regions[rid].len / block_size);
+
+ /* Sanity check and count the extents */
+ list = records;
+ while (list) {
+ offset = list->value->offset;
+ len = list->value->len;
+ dpa = offset + dcd->dc.regions[rid].base;
+
+ if (len == 0) {
+ error_setg(errp, "extent with 0 length is not allowed");
+ return;
+ }
+
+ if (offset % block_size || len % block_size) {
+ error_setg(errp, "dpa or len is not aligned to region block size");
+ return;
+ }
+
+ if (offset + len > dcd->dc.regions[rid].len) {
+ error_setg(errp, "extent range is beyond the region end");
+ return;
+ }
+
+ /* No duplicate or overlapped extents are allowed */
+ if (test_any_bits_set(blk_bitmap, offset / block_size,
+ len / block_size)) {
+ error_setg(errp, "duplicate or overlapped extents are detected");
+ return;
+ }
+ bitmap_set(blk_bitmap, offset / block_size, len / block_size);
+
+ if (type == DC_EVENT_RELEASE_CAPACITY) {
+ if (cxl_extent_groups_overlaps_dpa_range(&dcd->dc.extents_pending,
+ dpa, len)) {
+ error_setg(errp,
+ "cannot release extent with pending DPA range");
+ return;
+ }
+ if (!ct3_test_region_block_backed(dcd, dpa, len)) {
+ error_setg(errp,
+ "cannot release extent with non-existing DPA range");
+ return;
+ }
+ } else if (type == DC_EVENT_ADD_CAPACITY) {
+ if (cxl_extents_overlaps_dpa_range(&dcd->dc.extents, dpa, len)) {
+ error_setg(errp,
+ "cannot add DPA already accessible to the same LD");
+ return;
+ }
+ if (cxl_extent_groups_overlaps_dpa_range(&dcd->dc.extents_pending,
+ dpa, len)) {
+ error_setg(errp,
+ "cannot add DPA again while still pending");
+ return;
+ }
+ }
+ list = list->next;
+ num_extents++;
+ }
+
+ /* Create extent list for event being passed to host */
+ i = 0;
+ list = records;
+ extents = g_new0(CXLDCExtentRaw, num_extents);
+ while (list) {
+ offset = list->value->offset;
+ len = list->value->len;
+ dpa = dcd->dc.regions[rid].base + offset;
+
+ extents[i].start_dpa = dpa;
+ extents[i].len = len;
+ memset(extents[i].tag, 0, 0x10);
+ extents[i].shared_seq = 0;
+ if (type == DC_EVENT_ADD_CAPACITY) {
+ group = cxl_insert_extent_to_extent_group(group,
+ extents[i].start_dpa,
+ extents[i].len,
+ extents[i].tag,
+ extents[i].shared_seq);
+ }
+
+ list = list->next;
+ i++;
+ }
+ if (group) {
+ cxl_extent_group_list_insert_tail(&dcd->dc.extents_pending, group);
+ }
+
+ /*
+ * CXL r3.1 section 8.2.9.2.1.6: Dynamic Capacity Event Record
+ *
+ * All Dynamic Capacity event records shall set the Event Record Severity
+ * field in the Common Event Record Format to Informational Event. All
+ * Dynamic Capacity related events shall be logged in the Dynamic Capacity
+ * Event Log.
+ */
+ cxl_assign_event_header(hdr, &dynamic_capacity_uuid, flags, sizeof(dCap),
+ cxl_device_get_timestamp(&dcd->cxl_dstate));
+
+ dCap.type = type;
+ /* FIXME: for now, validity flag is cleared */
+ dCap.validity_flags = 0;
+ stw_le_p(&dCap.host_id, hid);
+ /* only valid for DC_REGION_CONFIG_UPDATED event */
+ dCap.updated_region_id = 0;
+ for (i = 0; i < num_extents; i++) {
+ memcpy(&dCap.dynamic_capacity_extent, &extents[i],
+ sizeof(CXLDCExtentRaw));
+
+ dCap.flags = 0;
+ if (i < num_extents - 1) {
+ /* Set "More" flag */
+ dCap.flags |= BIT(0);
+ }
+
+ if (cxl_event_insert(&dcd->cxl_dstate, enc_log,
+ (CXLEventRecordRaw *)&dCap)) {
+ cxl_event_irq_assert(dcd);
+ }
+ }
+}
+
+void qmp_cxl_add_dynamic_capacity(const char *path, uint16_t host_id,
+ CxlExtentSelectionPolicy sel_policy,
+ uint8_t region, const char *tag,
+ CxlDynamicCapacityExtentList *extents,
+ Error **errp)
+{
+ switch (sel_policy) {
+ case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE:
+ qmp_cxl_process_dynamic_capacity_prescriptive(path, host_id,
+ DC_EVENT_ADD_CAPACITY,
+ region, extents, errp);
+ return;
+ default:
+ error_setg(errp, "Selection policy not supported");
+ return;
+ }
+}
+
+void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id,
+ CxlExtentRemovalPolicy removal_policy,
+ bool has_forced_removal,
+ bool forced_removal,
+ bool has_sanitize_on_release,
+ bool sanitize_on_release,
+ uint8_t region,
+ const char *tag,
+ CxlDynamicCapacityExtentList *extents,
+ Error **errp)
+{
+ CXLDCEventType type = DC_EVENT_RELEASE_CAPACITY;
+
+ if (has_forced_removal && forced_removal) {
+ /* TODO: enable forced removal in the future */
+ type = DC_EVENT_FORCED_RELEASE_CAPACITY;
+ error_setg(errp, "Forced removal not supported yet");
+ return;
+ }
+
+ switch (removal_policy) {
+ case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE:
+ qmp_cxl_process_dynamic_capacity_prescriptive(path, host_id, type,
+ region, extents, errp);
+ return;
+ default:
+ error_setg(errp, "Removal policy not supported");
+ return;
+ }
+}
+
+static void ct3_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -1537,7 +2179,7 @@ static void ct3_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "CXL Memory Device (Type 3)";
- dc->reset = ct3d_reset;
+ device_class_set_legacy_reset(dc, ct3d_reset);
device_class_set_props(dc, ct3_props);
cvc->get_lsa_size = get_lsa_size;
@@ -1552,7 +2194,7 @@ static const TypeInfo ct3d_info = {
.class_size = sizeof(struct CXLType3Class),
.class_init = ct3_class_init,
.instance_size = sizeof(CXLType3Dev),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CXL_DEVICE },
{ INTERFACE_PCIE_DEVICE },
{}
diff --git a/hw/mem/cxl_type3_stubs.c b/hw/mem/cxl_type3_stubs.c
index 3e1851e..c1a5e4a 100644
--- a/hw/mem/cxl_type3_stubs.c
+++ b/hw/mem/cxl_type3_stubs.c
@@ -67,3 +67,28 @@ void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
{
error_setg(errp, "CXL Type 3 support is not compiled in");
}
+
+void qmp_cxl_add_dynamic_capacity(const char *path,
+ uint16_t host_id,
+ CxlExtentSelectionPolicy sel_policy,
+ uint8_t region,
+ const char *tag,
+ CxlDynamicCapacityExtentList *extents,
+ Error **errp)
+{
+ error_setg(errp, "CXL Type 3 support is not compiled in");
+}
+
+void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id,
+ CxlExtentRemovalPolicy removal_policy,
+ bool has_forced_removal,
+ bool forced_removal,
+ bool has_sanitize_on_release,
+ bool sanitize_on_release,
+ uint8_t region,
+ const char *tag,
+ CxlDynamicCapacityExtentList *extents,
+ Error **errp)
+{
+ error_setg(errp, "CXL Type 3 support is not compiled in");
+}
diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c
index a5f279a..1a432e9 100644
--- a/hw/mem/memory-device.c
+++ b/hw/mem/memory-device.c
@@ -16,8 +16,8 @@
#include "hw/boards.h"
#include "qemu/range.h"
#include "hw/virtio/vhost.h"
-#include "sysemu/kvm.h"
-#include "exec/address-spaces.h"
+#include "system/kvm.h"
+#include "system/address-spaces.h"
#include "trace.h"
static bool memory_device_is_empty(const MemoryDeviceState *md)
diff --git a/hw/mem/npcm7xx_mc.c b/hw/mem/npcm7xx_mc.c
index abc5af5..07fc108 100644
--- a/hw/mem/npcm7xx_mc.c
+++ b/hw/mem/npcm7xx_mc.c
@@ -65,7 +65,7 @@ static void npcm7xx_mc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
}
-static void npcm7xx_mc_class_init(ObjectClass *klass, void *data)
+static void npcm7xx_mc_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 1631a7d..23ab143e 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -30,7 +30,7 @@
#include "hw/mem/nvdimm.h"
#include "hw/qdev-properties.h"
#include "hw/mem/memory-device.h"
-#include "sysemu/hostmem.h"
+#include "system/hostmem.h"
static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
@@ -246,12 +246,11 @@ static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf,
memory_region_set_dirty(mr, backend_offset, size);
}
-static Property nvdimm_properties[] = {
+static const Property nvdimm_properties[] = {
DEFINE_PROP_BOOL(NVDIMM_UNARMED_PROP, NVDIMMDevice, unarmed, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void nvdimm_class_init(ObjectClass *oc, void *data)
+static void nvdimm_class_init(ObjectClass *oc, const void *data)
{
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 27919ca..f701d5b 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -28,8 +28,8 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/module.h"
-#include "sysemu/hostmem.h"
-#include "sysemu/numa.h"
+#include "system/hostmem.h"
+#include "system/numa.h"
#include "trace.h"
static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp);
@@ -150,14 +150,13 @@ out:
return slot;
}
-static Property pc_dimm_properties[] = {
+static const Property pc_dimm_properties[] = {
DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0),
DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0),
DEFINE_PROP_INT32(PC_DIMM_SLOT_PROP, PCDIMMDevice, slot,
PC_DIMM_UNASSIGNED_SLOT),
DEFINE_PROP_LINK(PC_DIMM_MEMDEV_PROP, PCDIMMDevice, hostmem,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
- DEFINE_PROP_END_OF_LIST(),
};
static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name,
@@ -277,7 +276,7 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md,
}
}
-static void pc_dimm_class_init(ObjectClass *oc, void *data)
+static void pc_dimm_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc);
@@ -302,7 +301,7 @@ static const TypeInfo pc_dimm_info = {
.instance_init = pc_dimm_init,
.class_init = pc_dimm_class_init,
.class_size = sizeof(PCDIMMDeviceClass),
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_MEMORY_DEVICE },
{ }
},
diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c
index 6e8f4f8..d7b00e5 100644
--- a/hw/mem/sparse-mem.c
+++ b/hw/mem/sparse-mem.c
@@ -17,7 +17,7 @@
#include "hw/sysbus.h"
#include "qapi/error.h"
#include "qemu/units.h"
-#include "sysemu/qtest.h"
+#include "system/qtest.h"
#include "hw/mem/sparse-mem.h"
#define SPARSE_MEM(obj) OBJECT_CHECK(SparseMemState, (obj), TYPE_SPARSE_MEM)
@@ -82,7 +82,6 @@ static void sparse_mem_enter_reset(Object *obj, ResetType type)
{
SparseMemState *s = SPARSE_MEM(obj);
g_hash_table_remove_all(s->mapped);
- return;
}
static const MemoryRegionOps sparse_mem_ops = {
@@ -96,14 +95,13 @@ static const MemoryRegionOps sparse_mem_ops = {
},
};
-static Property sparse_mem_properties[] = {
+static const Property sparse_mem_properties[] = {
/* The base address of the memory */
DEFINE_PROP_UINT64("baseaddr", SparseMemState, baseaddr, 0x0),
/* The length of the sparse memory region */
DEFINE_PROP_UINT64("length", SparseMemState, length, UINT64_MAX),
/* Max amount of actual memory that can be used to back the sparse memory */
DEFINE_PROP_UINT64("maxsize", SparseMemState, maxsize, 10 * MiB),
- DEFINE_PROP_END_OF_LIST(),
};
MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length)
@@ -138,7 +136,7 @@ static void sparse_mem_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->mmio);
}
-static void sparse_mem_class_init(ObjectClass *klass, void *data)
+static void sparse_mem_class_init(ObjectClass *klass, const void *data)
{
ResettableClass *rc = RESETTABLE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);