aboutsummaryrefslogtreecommitdiff
path: root/vl.c
diff options
context:
space:
mode:
authoraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2008-10-06 20:21:51 +0000
committeraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2008-10-06 20:21:51 +0000
commit475e4277721eca10e98bd926fa1d3ebc74d0ac35 (patch)
tree19382473b062c841833810eb6ecbdcd59303f2f0 /vl.c
parente18231a3ffe12a1f2004964594c7dd8ba3f78f33 (diff)
downloadqemu-475e4277721eca10e98bd926fa1d3ebc74d0ac35.zip
qemu-475e4277721eca10e98bd926fa1d3ebc74d0ac35.tar.gz
qemu-475e4277721eca10e98bd926fa1d3ebc74d0ac35.tar.bz2
Switch the memory savevm handler to be "live"
This patch replaces the static memory savevm/loadvm handler with a "live" one. This handler is used even if performing a non-live migration. The key difference between this handler and the previous is that each page is prefixed with the address of the page. The QEMUFile rate limiting code, in combination with the live migration dirty tracking bits, is used to determine which pages should be sent and how many should be sent. The live save code "converges" when the number of dirty pages reaches a fixed amount. Currently, this is 10 pages. This is something that should eventually be derived from whatever the bandwidth limitation is. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5437 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'vl.c')
-rw-r--r--vl.c293
1 files changed, 152 insertions, 141 deletions
diff --git a/vl.c b/vl.c
index 5ec93c9..439d317 100644
--- a/vl.c
+++ b/vl.c
@@ -6685,7 +6685,7 @@ int qemu_savevm_state_begin(QEMUFile *f)
int qemu_savevm_state_iterate(QEMUFile *f)
{
SaveStateEntry *se;
- int ret = 0;
+ int ret = 1;
for (se = first_se; se != NULL; se = se->next) {
if (se->save_live_state == NULL)
@@ -6695,7 +6695,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
qemu_put_byte(f, QEMU_VM_SECTION_PART);
qemu_put_be32(f, se->section_id);
- ret |= se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
+ ret &= !!se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
}
if (ret)
@@ -6761,7 +6761,7 @@ int qemu_savevm_state(QEMUFile *f)
ret = qemu_savevm_state_iterate(f);
if (ret < 0)
goto out;
- } while (ret == 1);
+ } while (ret == 0);
ret = qemu_savevm_state_complete(f);
@@ -7254,77 +7254,6 @@ static int ram_load_v1(QEMUFile *f, void *opaque)
#define IOBUF_SIZE 4096
#define RAM_CBLOCK_MAGIC 0xfabe
-typedef struct RamCompressState {
- z_stream zstream;
- QEMUFile *f;
- uint8_t buf[IOBUF_SIZE];
-} RamCompressState;
-
-static int ram_compress_open(RamCompressState *s, QEMUFile *f)
-{
- int ret;
- memset(s, 0, sizeof(*s));
- s->f = f;
- ret = deflateInit2(&s->zstream, 1,
- Z_DEFLATED, 15,
- 9, Z_DEFAULT_STRATEGY);
- if (ret != Z_OK)
- return -1;
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- return 0;
-}
-
-static void ram_put_cblock(RamCompressState *s, const uint8_t *buf, int len)
-{
- qemu_put_be16(s->f, RAM_CBLOCK_MAGIC);
- qemu_put_be16(s->f, len);
- qemu_put_buffer(s->f, buf, len);
-}
-
-static int ram_compress_buf(RamCompressState *s, const uint8_t *buf, int len)
-{
- int ret;
-
- s->zstream.avail_in = len;
- s->zstream.next_in = (uint8_t *)buf;
- while (s->zstream.avail_in > 0) {
- ret = deflate(&s->zstream, Z_NO_FLUSH);
- if (ret != Z_OK)
- return -1;
- if (s->zstream.avail_out == 0) {
- ram_put_cblock(s, s->buf, IOBUF_SIZE);
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- }
- }
- return 0;
-}
-
-static void ram_compress_close(RamCompressState *s)
-{
- int len, ret;
-
- /* compress last bytes */
- for(;;) {
- ret = deflate(&s->zstream, Z_FINISH);
- if (ret == Z_OK || ret == Z_STREAM_END) {
- len = IOBUF_SIZE - s->zstream.avail_out;
- if (len > 0) {
- ram_put_cblock(s, s->buf, len);
- }
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- if (ret == Z_STREAM_END)
- break;
- } else {
- goto fail;
- }
- }
-fail:
- deflateEnd(&s->zstream);
-}
-
typedef struct RamDecompressState {
z_stream zstream;
QEMUFile *f;
@@ -7372,61 +7301,121 @@ static void ram_decompress_close(RamDecompressState *s)
inflateEnd(&s->zstream);
}
-static void ram_save(QEMUFile *f, void *opaque)
+#define RAM_SAVE_FLAG_FULL 0x01
+#define RAM_SAVE_FLAG_COMPRESS 0x02
+#define RAM_SAVE_FLAG_MEM_SIZE 0x04
+#define RAM_SAVE_FLAG_PAGE 0x08
+#define RAM_SAVE_FLAG_EOS 0x10
+
+static int is_dup_page(uint8_t *page, uint8_t ch)
{
- ram_addr_t i;
- RamCompressState s1, *s = &s1;
- uint8_t buf[10];
+ uint32_t val = ch << 24 | ch << 16 | ch << 8 | ch;
+ uint32_t *array = (uint32_t *)page;
+ int i;
- qemu_put_be32(f, phys_ram_size);
- if (ram_compress_open(s, f) < 0)
- return;
- for(i = 0; i < phys_ram_size; i+= BDRV_HASH_BLOCK_SIZE) {
-#if 0
- if (tight_savevm_enabled) {
- int64_t sector_num;
- int j;
-
- /* find if the memory block is available on a virtual
- block device */
- sector_num = -1;
- for(j = 0; j < nb_drives; j++) {
- sector_num = bdrv_hash_find(drives_table[j].bdrv,
- phys_ram_base + i,
- BDRV_HASH_BLOCK_SIZE);
- if (sector_num >= 0)
- break;
+ for (i = 0; i < (TARGET_PAGE_SIZE / 4); i++) {
+ if (array[i] != val)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ram_save_block(QEMUFile *f)
+{
+ static ram_addr_t current_addr = 0;
+ ram_addr_t saved_addr = current_addr;
+ ram_addr_t addr = 0;
+ int found = 0;
+
+ while (addr < phys_ram_size) {
+ if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
+ uint8_t ch;
+
+ cpu_physical_memory_reset_dirty(current_addr,
+ current_addr + TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG);
+
+ ch = *(phys_ram_base + current_addr);
+
+ if (is_dup_page(phys_ram_base + current_addr, ch)) {
+ qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
+ qemu_put_byte(f, ch);
+ } else {
+ qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
+ qemu_put_buffer(f, phys_ram_base + current_addr, TARGET_PAGE_SIZE);
}
- if (j == nb_drives)
- goto normal_compress;
- buf[0] = 1;
- buf[1] = j;
- cpu_to_be64wu((uint64_t *)(buf + 2), sector_num);
- ram_compress_buf(s, buf, 10);
- } else
-#endif
- {
- // normal_compress:
- buf[0] = 0;
- ram_compress_buf(s, buf, 1);
- ram_compress_buf(s, phys_ram_base + i, BDRV_HASH_BLOCK_SIZE);
+
+ found = 1;
+ break;
}
+ addr += TARGET_PAGE_SIZE;
+ current_addr = (saved_addr + addr) % phys_ram_size;
}
- ram_compress_close(s);
+
+ return found;
}
-static int ram_load(QEMUFile *f, void *opaque, int version_id)
+static ram_addr_t ram_save_threshold = 10;
+
+static ram_addr_t ram_save_remaining(void)
+{
+ ram_addr_t addr;
+ ram_addr_t count = 0;
+
+ for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) {
+ if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
+ count++;
+ }
+
+ return count;
+}
+
+static int ram_save_live(QEMUFile *f, int stage, void *opaque)
+{
+ ram_addr_t addr;
+
+ if (stage == 1) {
+ /* Make sure all dirty bits are set */
+ for (addr = 0; addr < phys_ram_size; addr += TARGET_PAGE_SIZE) {
+ if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
+ cpu_physical_memory_set_dirty(addr);
+ }
+
+ /* Enable dirty memory tracking */
+ cpu_physical_memory_set_dirty_tracking(1);
+
+ qemu_put_be64(f, phys_ram_size | RAM_SAVE_FLAG_MEM_SIZE);
+ }
+
+ while (!qemu_file_rate_limit(f)) {
+ int ret;
+
+ ret = ram_save_block(f);
+ if (ret == 0) /* no more blocks */
+ break;
+ }
+
+ /* try transferring iterative blocks of memory */
+
+ if (stage == 3) {
+ cpu_physical_memory_set_dirty_tracking(0);
+
+ /* flush all remaining blocks regardless of rate limiting */
+ while (ram_save_block(f) != 0);
+ }
+
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+
+ return (stage == 2) && (ram_save_remaining() < ram_save_threshold);
+}
+
+static int ram_load_dead(QEMUFile *f, void *opaque)
{
RamDecompressState s1, *s = &s1;
uint8_t buf[10];
ram_addr_t i;
- if (version_id == 1)
- return ram_load_v1(f, opaque);
- if (version_id != 2)
- return -EINVAL;
- if (qemu_get_be32(f) != phys_ram_size)
- return -EINVAL;
if (ram_decompress_open(s, f) < 0)
return -EINVAL;
for(i = 0; i < phys_ram_size; i+= BDRV_HASH_BLOCK_SIZE) {
@@ -7439,35 +7428,57 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
fprintf(stderr, "Error while reading ram block address=0x%08" PRIx64, (uint64_t)i);
goto error;
}
- } else
-#if 0
- if (buf[0] == 1) {
- int bs_index;
- int64_t sector_num;
-
- ram_decompress_buf(s, buf + 1, 9);
- bs_index = buf[1];
- sector_num = be64_to_cpupu((const uint64_t *)(buf + 2));
- if (bs_index >= nb_drives) {
- fprintf(stderr, "Invalid block device index %d\n", bs_index);
- goto error;
- }
- if (bdrv_read(drives_table[bs_index].bdrv, sector_num,
- phys_ram_base + i,
- BDRV_HASH_BLOCK_SIZE / 512) < 0) {
- fprintf(stderr, "Error while reading sector %d:%" PRId64 "\n",
- bs_index, sector_num);
- goto error;
- }
- } else
-#endif
- {
+ } else {
error:
printf("Error block header\n");
return -EINVAL;
}
}
ram_decompress_close(s);
+
+ return 0;
+}
+
+static int ram_load(QEMUFile *f, void *opaque, int version_id)
+{
+ ram_addr_t addr;
+ int flags;
+
+ if (version_id == 1)
+ return ram_load_v1(f, opaque);
+
+ if (version_id == 2) {
+ if (qemu_get_be32(f) != phys_ram_size)
+ return -EINVAL;
+ return ram_load_dead(f, opaque);
+ }
+
+ if (version_id != 3)
+ return -EINVAL;
+
+ do {
+ addr = qemu_get_be64(f);
+
+ flags = addr & ~TARGET_PAGE_MASK;
+ addr &= TARGET_PAGE_MASK;
+
+ if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
+ if (addr != phys_ram_size)
+ return -EINVAL;
+ }
+
+ if (flags & RAM_SAVE_FLAG_FULL) {
+ if (ram_load_dead(f, opaque) < 0)
+ return -EINVAL;
+ }
+
+ if (flags & RAM_SAVE_FLAG_COMPRESS) {
+ uint8_t ch = qemu_get_byte(f);
+ memset(phys_ram_base + addr, ch, TARGET_PAGE_SIZE);
+ } else if (flags & RAM_SAVE_FLAG_PAGE)
+ qemu_get_buffer(f, phys_ram_base + addr, TARGET_PAGE_SIZE);
+ } while (!(flags & RAM_SAVE_FLAG_EOS));
+
return 0;
}
@@ -9512,7 +9523,7 @@ int main(int argc, char **argv)
exit(1);
register_savevm("timer", 0, 2, timer_save, timer_load, NULL);
- register_savevm("ram", 0, 2, ram_save, ram_load, NULL);
+ register_savevm_live("ram", 0, 3, ram_save_live, NULL, ram_load, NULL);
/* terminal init */
memset(&display_state, 0, sizeof(display_state));