aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorLiang Li <liang.z.li@intel.com>2016-05-05 15:32:57 +0800
committerAmit Shah <amit.shah@redhat.com>2016-06-17 18:24:26 +0530
commita7a9a88f9d29da125b0958f3bd1b15182dc94f5f (patch)
tree16c42c157ee7b49747aa404f7ba6f897c1f33614 /migration
parent90e56fb46d0a7add88ed463efa4e723a6238f692 (diff)
downloadqemu-a7a9a88f9d29da125b0958f3bd1b15182dc94f5f.zip
qemu-a7a9a88f9d29da125b0958f3bd1b15182dc94f5f.tar.gz
qemu-a7a9a88f9d29da125b0958f3bd1b15182dc94f5f.tar.bz2
migration: refine the compression code
The current code for multi-thread compression is not clear, especially in the aspect of using lock. Refine the code to make it clear. Signed-off-by: Liang Li <liang.z.li@intel.com> Message-Id: <1462433579-13691-8-git-send-email-liang.z.li@intel.com> Signed-off-by: Amit Shah <amit.shah@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/ram.c84
1 files changed, 40 insertions, 44 deletions
diff --git a/migration/ram.c b/migration/ram.c
index a5ed21b..59473d9 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -253,7 +253,6 @@ static struct BitmapRcu {
} *migration_bitmap_rcu;
struct CompressParam {
- bool start;
bool done;
bool quit;
QEMUFile *file;
@@ -293,34 +292,36 @@ static QemuThread *decompress_threads;
static QemuMutex decomp_done_lock;
static QemuCond decomp_done_cond;
-static int do_compress_ram_page(CompressParam *param);
+static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
+ ram_addr_t offset);
static void *do_data_compress(void *opaque)
{
CompressParam *param = opaque;
+ RAMBlock *block;
+ ram_addr_t offset;
+ qemu_mutex_lock(&param->mutex);
while (!param->quit) {
- qemu_mutex_lock(&param->mutex);
- /* Re-check the quit flag in case of
- * terminate_compression_threads is called just before
- * qemu_mutex_lock(&param->mutex) and after
- * while(!param->quit), re-check it here can make
- * sure the compression thread terminate as expected.
- */
- while (!param->start && !param->quit) {
+ if (param->block) {
+ block = param->block;
+ offset = param->offset;
+ param->block = NULL;
+ qemu_mutex_unlock(&param->mutex);
+
+ do_compress_ram_page(param->file, block, offset);
+
+ qemu_mutex_lock(comp_done_lock);
+ param->done = true;
+ qemu_cond_signal(comp_done_cond);
+ qemu_mutex_unlock(comp_done_lock);
+
+ qemu_mutex_lock(&param->mutex);
+ } else {
qemu_cond_wait(&param->cond, &param->mutex);
}
- if (!param->quit) {
- do_compress_ram_page(param);
- }
- param->start = false;
- qemu_mutex_unlock(&param->mutex);
-
- qemu_mutex_lock(comp_done_lock);
- param->done = true;
- qemu_cond_signal(comp_done_cond);
- qemu_mutex_unlock(comp_done_lock);
}
+ qemu_mutex_unlock(&param->mutex);
return NULL;
}
@@ -808,18 +809,15 @@ static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
return pages;
}
-static int do_compress_ram_page(CompressParam *param)
+static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
+ ram_addr_t offset)
{
int bytes_sent, blen;
- uint8_t *p;
- RAMBlock *block = param->block;
- ram_addr_t offset = param->offset;
+ uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
- p = block->host + (offset & TARGET_PAGE_MASK);
-
- bytes_sent = save_page_header(param->file, block, offset |
+ bytes_sent = save_page_header(f, block, offset |
RAM_SAVE_FLAG_COMPRESS_PAGE);
- blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
+ blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
migrate_compress_level());
if (blen < 0) {
bytes_sent = 0;
@@ -832,15 +830,6 @@ static int do_compress_ram_page(CompressParam *param)
return bytes_sent;
}
-static inline void start_compression(CompressParam *param)
-{
- param->done = false;
- qemu_mutex_lock(&param->mutex);
- param->start = true;
- qemu_cond_signal(&param->cond);
- qemu_mutex_unlock(&param->mutex);
-}
-
static inline void start_decompression(DecompressParam *param)
{
param->done = false;
@@ -860,18 +849,22 @@ static void flush_compressed_data(QEMUFile *f)
return;
}
thread_count = migrate_compress_threads();
+
+ qemu_mutex_lock(comp_done_lock);
for (idx = 0; idx < thread_count; idx++) {
- if (!comp_param[idx].done) {
- qemu_mutex_lock(comp_done_lock);
- while (!comp_param[idx].done && !comp_param[idx].quit) {
- qemu_cond_wait(comp_done_cond, comp_done_lock);
- }
- qemu_mutex_unlock(comp_done_lock);
+ while (!comp_param[idx].done) {
+ qemu_cond_wait(comp_done_cond, comp_done_lock);
}
+ }
+ qemu_mutex_unlock(comp_done_lock);
+
+ for (idx = 0; idx < thread_count; idx++) {
+ qemu_mutex_lock(&comp_param[idx].mutex);
if (!comp_param[idx].quit) {
len = qemu_put_qemu_file(f, comp_param[idx].file);
bytes_transferred += len;
}
+ qemu_mutex_unlock(&comp_param[idx].mutex);
}
}
@@ -893,9 +886,12 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
while (true) {
for (idx = 0; idx < thread_count; idx++) {
if (comp_param[idx].done) {
+ comp_param[idx].done = false;
bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
+ qemu_mutex_lock(&comp_param[idx].mutex);
set_compress_params(&comp_param[idx], block, offset);
- start_compression(&comp_param[idx]);
+ qemu_cond_signal(&comp_param[idx].cond);
+ qemu_mutex_unlock(&comp_param[idx].mutex);
pages = 1;
acct_info.norm_pages++;
*bytes_transferred += bytes_xmit;