diff options
author | Keqian Zhu <zhukeqian1@huawei.com> | 2020-04-13 18:15:08 +0800 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2020-05-07 17:40:24 +0100 |
commit | cbbf818224faf5ede75c876e4900c9f8e6b6c0db (patch) | |
tree | 784aaa0ad9cf5964fb0c5219d629afea051423bb /migration/ram.c | |
parent | 58602676dfd00f519248d4936b0711b7967cbf62 (diff) | |
download | qemu-cbbf818224faf5ede75c876e4900c9f8e6b6c0db.zip qemu-cbbf818224faf5ede75c876e4900c9f8e6b6c0db.tar.gz qemu-cbbf818224faf5ede75c876e4900c9f8e6b6c0db.tar.bz2 |
migration/throttle: Add cpu-throttle-tailslow migration parameter
At the tail stage of throttling, the Guest is very sensitive to
CPU percentage while the @cpu-throttle-increment is excessive
usually at tail stage.
If this parameter is true, we will compute the ideal CPU percentage
used by the Guest, which may exactly make the dirty rate match the
dirty rate threshold. Then we will choose a smaller throttle increment
between the one specified by @cpu-throttle-increment and the one
generated by ideal CPU percentage.
Therefore, it is compatible to traditional throttling, meanwhile
the throttle increment won't be excessive at tail stage. This may
make migration time longer, and is disabled by default.
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Message-Id: <20200413101508.54793-1-zhukeqian1@huawei.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/migration/ram.c b/migration/ram.c index 53166fc..52fc032 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -616,20 +616,34 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block, * able to complete migration. Some workloads dirty memory way too * fast and will not effectively converge, even with auto-converge. */ -static void mig_throttle_guest_down(void) +static void mig_throttle_guest_down(uint64_t bytes_dirty_period, + uint64_t bytes_dirty_threshold) { MigrationState *s = migrate_get_current(); uint64_t pct_initial = s->parameters.cpu_throttle_initial; - uint64_t pct_icrement = s->parameters.cpu_throttle_increment; + uint64_t pct_increment = s->parameters.cpu_throttle_increment; + bool pct_tailslow = s->parameters.cpu_throttle_tailslow; int pct_max = s->parameters.max_cpu_throttle; + uint64_t throttle_now = cpu_throttle_get_percentage(); + uint64_t cpu_now, cpu_ideal, throttle_inc; + /* We have not started throttling yet. Let's start it. */ if (!cpu_throttle_active()) { cpu_throttle_set(pct_initial); } else { /* Throttling already on, just increase the rate */ - cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement, - pct_max)); + if (!pct_tailslow) { + throttle_inc = pct_increment; + } else { + /* Compute the ideal CPU percentage used by Guest, which may + * make the dirty rate match the dirty rate threshold. */ + cpu_now = 100 - throttle_now; + cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / + bytes_dirty_period); + throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); + } + cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); } } @@ -919,7 +933,8 @@ static void migration_trigger_throttle(RAMState *rs) (++rs->dirty_rate_high_cnt >= 2)) { trace_migration_throttle(); rs->dirty_rate_high_cnt = 0; - mig_throttle_guest_down(); + mig_throttle_guest_down(bytes_dirty_period, + bytes_dirty_threshold); } } } |