diff options
-rw-r--r-- | docs/rdma.txt | 6 | ||||
-rw-r--r-- | migration-rdma.c | 4 |
2 files changed, 5 insertions, 5 deletions
diff --git a/docs/rdma.txt b/docs/rdma.txt index 1f5d9e9..2bdd0a5 100644 --- a/docs/rdma.txt +++ b/docs/rdma.txt @@ -18,7 +18,7 @@ Contents: * RDMA Migration Protocol Description * Versioning and Capabilities * QEMUFileRDMA Interface -* Migration of pc.ram +* Migration of VM's ram * Error handling * TODO @@ -149,7 +149,7 @@ The only difference between a SEND message and an RDMA message is that SEND messages cause notifications to be posted to the completion queue (CQ) on the infiniband receiver side, whereas RDMA messages (used -for pc.ram) do not (to behave like an actual DMA). +for VM's ram) do not (to behave like an actual DMA). Messages in infiniband require two things: @@ -355,7 +355,7 @@ If the buffer is empty, then we follow the same steps listed above and issue another "QEMU File" protocol command, asking for a new SEND message to re-fill the buffer. -Migration of pc.ram: +Migration of VM's ram: ==================== At the beginning of the migration, (migration-rdma.c), diff --git a/migration-rdma.c b/migration-rdma.c index d99812c..b32dbdf 100644 --- a/migration-rdma.c +++ b/migration-rdma.c @@ -2523,7 +2523,7 @@ static void *qemu_rdma_data_init(const char *host_port, Error **errp) /* * QEMUFile interface to the control channel. * SEND messages for control only. - * pc.ram is handled with regular RDMA messages. + * VM's ram is handled with regular RDMA messages. */ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) @@ -2539,7 +2539,7 @@ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, /* * Push out any writes that - * we're queued up for pc.ram. + * we're queued up for VM's ram. */ ret = qemu_rdma_write_flush(f, rdma); if (ret < 0) { |