aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2024-01-02 10:35:25 -0500
committerStefan Hajnoczi <stefanha@redhat.com>2024-01-08 10:45:43 -0500
commit195801d700c008b6a8d8acfa299aa5f177446647 (patch)
tree7ab423e4a773b818f6c6d65f2fa06dc4517cad24 /hw
parent897a06c6d7ce8fb962a33cea1910d17218c746e9 (diff)
downloadqemu-195801d700c008b6a8d8acfa299aa5f177446647.zip
qemu-195801d700c008b6a8d8acfa299aa5f177446647.tar.gz
qemu-195801d700c008b6a8d8acfa299aa5f177446647.tar.bz2
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
The Big QEMU Lock (BQL) has many names and they are confusing. The actual QemuMutex variable is called qemu_global_mutex but it's commonly referred to as the BQL in discussions and some code comments. The locking APIs, however, are called qemu_mutex_lock_iothread() and qemu_mutex_unlock_iothread(). The "iothread" name is historic and comes from when the main thread was split into into KVM vcpu threads and the "iothread" (now called the main loop thread). I have contributed to the confusion myself by introducing a separate --object iothread, a separate concept unrelated to the BQL. The "iothread" name is no longer appropriate for the BQL. Rename the locking APIs to: - void bql_lock(void) - void bql_unlock(void) - bool bql_locked(void) There are more APIs with "iothread" in their names. Subsequent patches will rename them. There are also comments and documentation that will be updated in later patches. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Acked-by: Fabiano Rosas <farosas@suse.de> Acked-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Cédric Le Goater <clg@kaod.org> Acked-by: Peter Xu <peterx@redhat.com> Acked-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Acked-by: Hyman Huang <yong.huang@smartx.com> Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> Message-id: 20240102153529.486531-2-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/core/cpu-common.c6
-rw-r--r--hw/i386/intel_iommu.c6
-rw-r--r--hw/i386/kvm/xen_evtchn.c16
-rw-r--r--hw/i386/kvm/xen_overlay.c2
-rw-r--r--hw/i386/kvm/xen_xenstore.c2
-rw-r--r--hw/intc/arm_gicv3_cpuif.c2
-rw-r--r--hw/intc/s390_flic.c18
-rw-r--r--hw/misc/edu.c4
-rw-r--r--hw/misc/imx6_src.c2
-rw-r--r--hw/misc/imx7_src.c2
-rw-r--r--hw/net/xen_nic.c8
-rw-r--r--hw/ppc/pegasos2.c2
-rw-r--r--hw/ppc/ppc.c4
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/ppc/spapr_rng.c4
-rw-r--r--hw/ppc/spapr_softmmu.c4
-rw-r--r--hw/remote/mpqemu-link.c20
-rw-r--r--hw/remote/vfio-user-obj.c2
-rw-r--r--hw/s390x/s390-skeys.c2
19 files changed, 54 insertions, 54 deletions
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index d0e7bbd..3ccfe88 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename)
* BQL here if we need to. cpu_interrupt assumes it is held.*/
void cpu_reset_interrupt(CPUState *cpu, int mask)
{
- bool need_lock = !qemu_mutex_iothread_locked();
+ bool need_lock = !bql_locked();
if (need_lock) {
- qemu_mutex_lock_iothread();
+ bql_lock();
}
cpu->interrupt_request &= ~mask;
if (need_lock) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index ed5677c..1a07fad 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
{
bool use_iommu, pt;
/* Whether we need to take the BQL on our own */
- bool take_bql = !qemu_mutex_iothread_locked();
+ bool take_bql = !bql_locked();
assert(as);
@@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
* it. We'd better make sure we have had it already, or, take it.
*/
if (take_bql) {
- qemu_mutex_lock_iothread();
+ bql_lock();
}
/* Turn off first then on the other */
@@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
}
if (take_bql) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
return use_iommu;
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 9a5f3ca..4a835a1 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level)
* effect immediately. That just leaves interdomain loopback as the case
* which uses the BH.
*/
- if (!qemu_mutex_iothread_locked()) {
+ if (!bql_locked()) {
qemu_bh_schedule(s->gsi_bh);
return;
}
@@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param)
* We need the BQL because set_callback_pci_intx() may call into PCI code,
* and because we may need to manipulate the old and new GSI levels.
*/
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
qemu_mutex_lock(&s->port_lock);
switch (type) {
@@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
XenEvtchnPort *p = &s->port_table[port];
/* Because it *might* be a PIRQ port */
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
switch (p->type) {
case EVTCHNSTAT_closed:
@@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void)
return -ENOTSUP;
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
qemu_mutex_lock(&s->port_lock);
@@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
XenEvtchnState *s = xen_evtchn_singleton;
int pirq;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
return false;
@@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
return;
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
pirq = msi_pirq_target(addr, data);
@@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
return 1; /* Not a PIRQ */
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
pirq = msi_pirq_target(address, data);
if (!pirq || pirq >= s->nr_pirqs) {
@@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
return false;
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
pirq = msi_pirq_target(address, data);
if (!pirq || pirq >= s->nr_pirqs) {
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
index 526f7a6..c68e78a 100644
--- a/hw/i386/kvm/xen_overlay.c
+++ b/hw/i386/kvm/xen_overlay.c
@@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
return -ENOENT;
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
if (s->shinfo_gpa) {
/* If removing shinfo page, turn the kernel magic off first */
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index c3633f7..1a9bc34 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
{
XenXenstoreState *s = opaque;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
/*
* If there's a response pending, we obviously can't scribble over
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index ab1a005..77c2a6d 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs->cpu);
CPUARMState *env = &cpu->env;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
cs->hppi.grp, cs->hppi.prio);
diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c
index 212f268..f4a8484 100644
--- a/hw/intc/s390_flic.c
+++ b/hw/intc/s390_flic.c
@@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
QEMUS390FlicIO *cur, *next;
uint8_t isc;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
if (!(flic->pending & FLIC_PENDING_IO)) {
return 0;
}
@@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
{
uint32_t tmp;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
g_assert(flic->pending & FLIC_PENDING_SERVICE);
tmp = flic->service_param;
flic->service_param = 0;
@@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
QEMUS390FlicIO *io;
uint8_t isc;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
return NULL;
}
@@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
{
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
flic->pending &= ~FLIC_PENDING_MCHK_CR;
}
@@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
/* multiplexing is good enough for sclp - kvm does it internally as well */
flic->service_param |= parm;
flic->pending |= FLIC_PENDING_SERVICE;
@@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
QEMUS390FlicIO *io;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
io = g_new0(QEMUS390FlicIO, 1);
io->id = subchannel_id;
io->nr = subchannel_nr;
@@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
flic->pending |= FLIC_PENDING_MCHK_CR;
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
@@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
{
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
return !!flic->pending;
}
@@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev)
QEMUS390FlicIO *cur, *next;
int isc;
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
flic->simm = 0;
flic->nimm = 0;
flic->pending = 0;
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
index e64a246..2a976ca 100644
--- a/hw/misc/edu.c
+++ b/hw/misc/edu.c
@@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque)
smp_mb__after_rmw();
if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) {
- qemu_mutex_lock_iothread();
+ bql_lock();
edu_raise_irq(edu, FACT_IRQ);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
}
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
index d20727e..0c60035 100644
--- a/hw/misc/imx6_src.c
+++ b/hw/misc/imx6_src.c
@@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
struct SRCSCRResetInfo *ri = data.host_ptr;
IMX6SRCState *s = ri->s;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0);
DPRINTF("reg[%s] <= 0x%" PRIx32 "\n",
diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c
index 24a0b46..b3725ff 100644
--- a/hw/misc/imx7_src.c
+++ b/hw/misc/imx7_src.c
@@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data)
struct SRCSCRResetInfo *ri = data.host_ptr;
IMX7SRCState *s = ri->s;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0);
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 1e2b3bae..453fdb9 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev)
void *page;
void *tmpbuf = NULL;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
for (;;) {
rc = netdev->tx_ring.req_cons;
@@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
RING_IDX rc, rp;
void *page;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) {
return -1;
@@ -354,7 +354,7 @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp)
XenNetDev *netdev = XEN_NET_DEVICE(xendev);
unsigned int port, rx_copy;
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u",
&netdev->tx_ring_ref) != 1) {
@@ -425,7 +425,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp)
trace_xen_netdev_disconnect(netdev->dev);
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
netdev->tx_ring.sring = NULL;
netdev->rx_ring.sring = NULL;
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index 3203a4a..d84f3f9 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -515,7 +515,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
CPUPPCState *env = &cpu->env;
/* The TCG path should also be holding the BQL at this point */
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
if (FIELD_EX64(env->msr, MSR, PR)) {
qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n");
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index c532d79..da1626f 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
{
PowerPCCPU *cpu = env_archcpu(env);
- qemu_mutex_lock_iothread();
+ bql_lock();
switch ((val >> 28) & 0x3) {
case 0x0:
@@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
break;
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
/* PowerPC 40x internal IRQ controller */
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 4997aa4..e8dabc8 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1304,7 +1304,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
CPUPPCState *env = &cpu->env;
/* The TCG path should also be holding the BQL at this point */
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
g_assert(!vhyp_cpu_in_nested(cpu));
diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c
index df5c4b9..c2fda7a 100644
--- a/hw/ppc/spapr_rng.c
+++ b/hw/ppc/spapr_rng.c
@@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr,
while (hrdata.received < 8) {
rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
random_recv, &hrdata);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
qemu_sem_wait(&hrdata.sem);
- qemu_mutex_lock_iothread();
+ bql_lock();
}
qemu_sem_destroy(&hrdata.sem);
diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c
index 2786663..fc1bbc0 100644
--- a/hw/ppc/spapr_softmmu.c
+++ b/hw/ppc/spapr_softmmu.c
@@ -334,7 +334,7 @@ static void *hpt_prepare_thread(void *opaque)
pending->ret = H_NO_MEM;
}
- qemu_mutex_lock_iothread();
+ bql_lock();
if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
/* Ready to go */
@@ -344,7 +344,7 @@ static void *hpt_prepare_thread(void *opaque)
free_pending_hpt(pending);
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return NULL;
}
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
index 9bd98e8..d04ac93 100644
--- a/hw/remote/mpqemu-link.c
+++ b/hw/remote/mpqemu-link.c
@@ -33,7 +33,7 @@
*/
bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
{
- bool iolock = qemu_mutex_iothread_locked();
+ bool drop_bql = bql_locked();
bool iothread = qemu_in_iothread();
struct iovec send[2] = {};
int *fds = NULL;
@@ -63,8 +63,8 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
* for IOThread case.
* Also skip lock handling while in a co-routine in the main context.
*/
- if (iolock && !iothread && !qemu_in_coroutine()) {
- qemu_mutex_unlock_iothread();
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
+ bql_unlock();
}
if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send),
@@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds);
}
- if (iolock && !iothread && !qemu_in_coroutine()) {
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
/* See above comment why skip locking here. */
- qemu_mutex_lock_iothread();
+ bql_lock();
}
return ret;
@@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
size_t *nfds, Error **errp)
{
struct iovec iov = { .iov_base = buf, .iov_len = len };
- bool iolock = qemu_mutex_iothread_locked();
+ bool drop_bql = bql_locked();
bool iothread = qemu_in_iothread();
int ret = -1;
@@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds,
*/
assert(qemu_in_coroutine() || !iothread);
- if (iolock && !iothread && !qemu_in_coroutine()) {
- qemu_mutex_unlock_iothread();
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
+ bql_unlock();
}
ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp);
- if (iolock && !iothread && !qemu_in_coroutine()) {
- qemu_mutex_lock_iothread();
+ if (drop_bql && !iothread && !qemu_in_coroutine()) {
+ bql_lock();
}
return (ret <= 0) ? ret : iov.iov_len;
diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
index 8b10c32..d9b879e 100644
--- a/hw/remote/vfio-user-obj.c
+++ b/hw/remote/vfio-user-obj.c
@@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
}
if (release_lock) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
release_lock = false;
}
diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c
index 8f5159d..5c535d4 100644
--- a/hw/s390x/s390-skeys.c
+++ b/hw/s390x/s390-skeys.c
@@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp)
goto out;
}
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
guest_phys_blocks_init(&guest_phys_blocks);
guest_phys_blocks_append(&guest_phys_blocks);