aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c52
1 files changed, 30 insertions, 22 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 0ef6879..02cfd76 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -896,11 +896,38 @@ static void migration_update_rates(RAMState *rs, int64_t end_time)
}
}
+static void migration_trigger_throttle(RAMState *rs)
+{
+ MigrationState *s = migrate_get_current();
+ uint64_t threshold = s->parameters.throttle_trigger_threshold;
+
+ uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
+ uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
+ uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
+
+ /* During block migration the auto-converge logic incorrectly detects
+ * that ram migration makes no progress. Avoid this by disabling the
+ * throttling logic during the bulk phase of block migration. */
+ if (migrate_auto_converge() && !blk_mig_bulk_active()) {
+ /* The following detection logic can be refined later. For now:
+ Check to see if the ratio between dirtied bytes and the approx.
+ amount of bytes that just got transferred since the last time
+ we were in this routine reaches the threshold. If that happens
+ twice, start or increase throttling. */
+
+ if ((bytes_dirty_period > bytes_dirty_threshold) &&
+ (++rs->dirty_rate_high_cnt >= 2)) {
+ trace_migration_throttle();
+ rs->dirty_rate_high_cnt = 0;
+ mig_throttle_guest_down();
+ }
+ }
+}
+
static void migration_bitmap_sync(RAMState *rs)
{
RAMBlock *block;
int64_t end_time;
- uint64_t bytes_xfer_now;
ram_counters.dirty_sync_count++;
@@ -927,26 +954,7 @@ static void migration_bitmap_sync(RAMState *rs)
/* more than 1 second = 1000 millisecons */
if (end_time > rs->time_last_bitmap_sync + 1000) {
- bytes_xfer_now = ram_counters.transferred;
-
- /* During block migration the auto-converge logic incorrectly detects
- * that ram migration makes no progress. Avoid this by disabling the
- * throttling logic during the bulk phase of block migration. */
- if (migrate_auto_converge() && !blk_mig_bulk_active()) {
- /* The following detection logic can be refined later. For now:
- Check to see if the dirtied bytes is 50% more than the approx.
- amount of bytes that just got transferred since the last time we
- were in this routine. If that happens twice, start or increase
- throttling */
-
- if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
- (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
- (++rs->dirty_rate_high_cnt >= 2)) {
- trace_migration_throttle();
- rs->dirty_rate_high_cnt = 0;
- mig_throttle_guest_down();
- }
- }
+ migration_trigger_throttle(rs);
migration_update_rates(rs, end_time);
@@ -955,7 +963,7 @@ static void migration_bitmap_sync(RAMState *rs)
/* reset period counters */
rs->time_last_bitmap_sync = end_time;
rs->num_dirty_pages_period = 0;
- rs->bytes_xfer_prev = bytes_xfer_now;
+ rs->bytes_xfer_prev = ram_counters.transferred;
}
if (migrate_use_events()) {
qapi_event_send_migration_pass(ram_counters.dirty_sync_count);