aboutsummaryrefslogtreecommitdiff
path: root/migration/migration.h
blob: f2c8b8f2866653ee0b077ebbf02e6dcc85667839 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#ifndef QEMU_MIGRATION_H
#define QEMU_MIGRATION_H

#include "exec/cpu-common.h"
#include "hw/qdev-core.h"
#include "qapi/qapi-types-migration.h"
#include "qapi/qmp/json-writer.h"
#include "qemu/thread.h"
#include "qemu/coroutine_int.h"
#include "io/channel.h"
#include "io/channel-buffer.h"
#include "net/announce.h"
#include "qom/object.h"
#include "postcopy-ram.h"
#include "sysemu/runstate.h"

struct PostcopyBlocktimeContext;

#define  MIGRATION_RESUME_ACK_VALUE  (1)

/*
 * 1<<6=64 pages -> 256K chunk when page size is 4K.  This gives us
 * the benefit that all the chunks are 64 pages aligned then the
 * bitmaps are always aligned to LONG.
 */
#define CLEAR_BITMAP_SHIFT_MIN             6
/*
 * 1<<18=256K pages -> 1G chunk when page size is 4K.  This is the
 * default value to use if no one specified.
 */
#define CLEAR_BITMAP_SHIFT_DEFAULT        18
/*
 * 1<<31=2G pages -> 8T chunk when page size is 4K.  This should be
 * big enough and make sure we won't overflow easily.
 */
#define CLEAR_BITMAP_SHIFT_MAX            31

/* This is an abstraction of a "temp huge page" for postcopy's purpose */
typedef struct {
    /*
     * This points to a temporary huge page as a buffer for UFFDIO_COPY.  It's
     * mmap()ed and needs to be freed when cleanup.
     */
    void *tmp_huge_page;
    /*
     * This points to the host page we're going to install for this temp page.
     * It tells us after we've received the whole page, where we should put it.
     */
    void *host_addr;
    /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
    unsigned int target_pages;
    /* Whether this page contains all zeros */
    bool all_zero;
} PostcopyTmpPage;

typedef enum {
    PREEMPT_THREAD_NONE = 0,
    PREEMPT_THREAD_CREATED,
    PREEMPT_THREAD_QUIT,
} PreemptThreadStatus;

/* State for the incoming migration */
struct MigrationIncomingState {
    QEMUFile *from_src_file;
    /* Previously received RAM's RAMBlock pointer */
    RAMBlock *last_recv_block[RAM_CHANNEL_MAX];
    /* A hook to allow cleanup at the end of incoming migration */
    void *transport_data;
    void (*transport_cleanup)(void *data);
    /*
     * Used to sync thread creations.  Note that we can't create threads in
     * parallel with this sem.
     */
    QemuSemaphore  thread_sync_sem;
    /*
     * Free at the start of the main state load, set as the main thread finishes
     * loading state.
     */
    QemuEvent main_thread_load_event;

    /* For network announces */
    AnnounceTimer  announce_timer;

    size_t         largest_page_size;
    bool           have_fault_thread;
    QemuThread     fault_thread;
    /* Set this when we want the fault thread to quit */
    bool           fault_thread_quit;

    bool           have_listen_thread;
    QemuThread     listen_thread;

    /* For the kernel to send us notifications */
    int       userfault_fd;
    /* To notify the fault_thread to wake, e.g., when need to quit */
    int       userfault_event_fd;
    QEMUFile *to_src_file;
    QemuMutex rp_mutex;    /* We send replies from multiple threads */
    /* RAMBlock of last request sent to source */
    RAMBlock *last_rb;
    /*
     * Number of postcopy channels including the default precopy channel, so
     * vanilla postcopy will only contain one channel which contain both
     * precopy and postcopy streams.
     *
     * This is calculated when the src requests to enable postcopy but before
     * it starts.  Its value can depend on e.g. whether postcopy preemption is
     * enabled.
     */
    unsigned int postcopy_channels;
    /* QEMUFile for postcopy only; it'll be handled by a separate thread */
    QEMUFile *postcopy_qemufile_dst;
    /*
     * When postcopy_qemufile_dst is properly setup, this sem is posted.
     * One can wait on this semaphore to wait until the preempt channel is
     * properly setup.
     */
    QemuSemaphore postcopy_qemufile_dst_done;
    /* Postcopy priority thread is used to receive postcopy requested pages */
    QemuThread postcopy_prio_thread;
    /*
     * Always set by the main vm load thread only, but can be read by the
     * postcopy preempt thread.  "volatile" makes sure all reads will be
     * up-to-date across cores.
     */
    volatile PreemptThreadStatus preempt_thread_status;
    /*
     * Used to sync between the ram load main thread and the fast ram load
     * thread.  It protects postcopy_qemufile_dst, which is the postcopy
     * fast channel.
     *
     * The ram fast load thread will take it mostly for the whole lifecycle
     * because it needs to continuously read data from the channel, and
     * it'll only release this mutex if postcopy is interrupted, so that
     * the ram load main thread will take this mutex over and properly
     * release the broken channel.
     */
    QemuMutex postcopy_prio_thread_mutex;
    /*
     * An array of temp host huge pages to be used, one for each postcopy
     * channel.
     */
    PostcopyTmpPage *postcopy_tmp_pages;
    /* This is shared for all postcopy channels */
    void     *postcopy_tmp_zero_page;
    /* PostCopyFD's for external userfaultfds & handlers of shared memory */
    GArray   *postcopy_remote_fds;

    int state;

    /*
     * The incoming migration coroutine, non-NULL during qemu_loadvm_state().
     * Used to wake the migration incoming coroutine from rdma code. How much is
     * it safe - it's a question.
     */
    Coroutine *loadvm_co;

    /* The coroutine we should enter (back) after failover */
    Coroutine *colo_incoming_co;
    QemuSemaphore colo_incoming_sem;

    /*
     * PostcopyBlocktimeContext to keep information for postcopy
     * live migration, to calculate vCPU block time
     * */
    struct PostcopyBlocktimeContext *blocktime_ctx;

    /* notify PAUSED postcopy incoming migrations to try to continue */
    QemuSemaphore postcopy_pause_sem_dst;
    QemuSemaphore postcopy_pause_sem_fault;
    /*
     * This semaphore is used to allow the ram fast load thread (only when
     * postcopy preempt is enabled) fall into sleep when there's network
     * interruption detected.  When the recovery is done, the main load
     * thread will kick the fast ram load thread using this semaphore.
     */
    QemuSemaphore postcopy_pause_sem_fast_load;

    /* List of listening socket addresses  */
    SocketAddressList *socket_address_list;

    /* A tree of pages that we requested to the source VM */
    GTree *page_requested;
    /*
     * For postcopy only, count the number of requested page faults that
     * still haven't been resolved.
     */
    int page_requested_count;
    /*
     * The mutex helps to maintain the requested pages that we sent to the
     * source, IOW, to guarantee coherent between the page_requests tree and
     * the per-ramblock receivedmap.  Note! This does not guarantee consistency
     * of the real page copy procedures (using UFFDIO_[ZERO]COPY).  E.g., even
     * if one bit in receivedmap is cleared, UFFDIO_COPY could have happened
     * for that page already.  This is intended so that the mutex won't
     * serialize and blocked by slow operations like UFFDIO_* ioctls.  However
     * this should be enough to make sure the page_requested tree always
     * contains valid information.
     */
    QemuMutex page_request_mutex;
    /*
     * If postcopy preempt is enabled, there is a chance that the main
     * thread finished loading its data before the preempt channel has
     * finished loading the urgent pages.  If that happens, the two threads
     * will use this condvar to synchronize, so the main thread will always
     * wait until all pages received.
     */
    QemuCond page_request_cond;

    /*
     * Number of devices that have yet to approve switchover. When this reaches
     * zero an ACK that it's OK to do switchover is sent to the source. No lock
     * is needed as this field is updated serially.
     */
    unsigned int switchover_ack_pending_num;
};

MigrationIncomingState *migration_incoming_get_current(void);
void migration_incoming_state_destroy(void);
void migration_incoming_transport_cleanup(MigrationIncomingState *mis);
/*
 * Functions to work with blocktime context
 */
void fill_destination_postcopy_migration_info(MigrationInfo *info);

#define TYPE_MIGRATION "migration"

typedef struct MigrationClass MigrationClass;
DECLARE_OBJ_CHECKERS(MigrationState, MigrationClass,
                     MIGRATION_OBJ, TYPE_MIGRATION)

struct MigrationClass {
    /*< private >*/
    DeviceClass parent_class;
};

struct MigrationState {
    /*< private >*/
    DeviceState parent_obj;

    /*< public >*/
    QemuThread thread;
    /* Protected by qemu_file_lock */
    QEMUFile *to_dst_file;
    /* Postcopy specific transfer channel */
    QEMUFile *postcopy_qemufile_src;
    /*
     * It is posted when the preempt channel is established.  Note: this is
     * used for both the start or recover of a postcopy migration.  We'll
     * post to this sem every time a new preempt channel is created in the
     * main thread, and we keep post() and wait() in pair.
     */
    QemuSemaphore postcopy_qemufile_src_sem;
    QIOChannelBuffer *bioc;
    /*
     * Protects to_dst_file/from_dst_file pointers.  We need to make sure we
     * won't yield or hang during the critical section, since this lock will be
     * used in OOB command handler.
     */
    QemuMutex qemu_file_lock;

    /*
     * Used to allow urgent requests to override rate limiting.
     */
    QemuSemaphore rate_limit_sem;

    /* pages already send at the beginning of current iteration */
    uint64_t iteration_initial_pages;

    /* pages transferred per second */
    double pages_per_second;

    /* bytes already send at the beginning of current iteration */
    uint64_t iteration_initial_bytes;
    /* time at the start of current iteration */
    int64_t iteration_start_time;
    /*
     * The final stage happens when the remaining data is smaller than
     * this threshold; it's calculated from the requested downtime and
     * measured bandwidth, or avail-switchover-bandwidth if specified.
     */
    uint64_t threshold_size;

    /* params from 'migrate-set-parameters' */
    MigrationParameters parameters;

    int state;

    /* State related to return path */
    struct {
        /* Protected by qemu_file_lock */
        QEMUFile     *from_dst_file;
        QemuThread    rp_thread;
        /*
         * We can also check non-zero of rp_thread, but there's no "official"
         * way to do this, so this bool makes it slightly more elegant.
         * Checking from_dst_file for this is racy because from_dst_file will
         * be cleared in the rp_thread!
         */
        bool          rp_thread_created;
        /*
         * Used to synchronize between migration main thread and return
         * path thread.  The migration thread can wait() on this sem, while
         * other threads (e.g., return path thread) can kick it using a
         * post().
         */
        QemuSemaphore rp_sem;
        /*
         * We post to this when we got one PONG from dest. So far it's an
         * easy way to know the main channel has successfully established
         * on dest QEMU.
         */
        QemuSemaphore rp_pong_acks;
    } rp_state;

    double mbps;
    /* Timestamp when recent migration starts (ms) */
    int64_t start_time;
    /* Total time used by latest migration (ms) */
    int64_t total_time;
    /* Timestamp when VM is down (ms) to migrate the last stuff */
    int64_t downtime_start;
    int64_t downtime;
    int64_t expected_downtime;
    bool capabilities[MIGRATION_CAPABILITY__MAX];
    int64_t setup_time;

    /*
     * State before stopping the vm by vm_stop_force_state().
     * If migration is interrupted by any reason, we need to continue
     * running the guest on source if it was running or restore its stopped
     * state.
     */
    RunState vm_old_state;

    /* Flag set once the migration has been asked to enter postcopy */
    bool start_postcopy;
    /* Flag set after postcopy has sent the device state */
    bool postcopy_after_devices;

    /* Flag set once the migration thread is running (and needs joining) */
    bool migration_thread_running;

    /* Flag set once the migration thread called bdrv_inactivate_all */
    bool block_inactive;

    /* Migration is waiting for guest to unplug device */
    QemuSemaphore wait_unplug_sem;

    /* Migration is paused due to pause-before-switchover */
    QemuSemaphore pause_sem;

    /* The semaphore is used to notify COLO thread that failover is finished */
    QemuSemaphore colo_exit_sem;

    /* The event is used to notify COLO thread to do checkpoint */
    QemuEvent colo_checkpoint_event;
    int64_t colo_checkpoint_time;
    QEMUTimer *colo_delay_timer;

    /* The first error that has occurred.
       We used the mutex to be able to return the 1st error message */
    Error *error;
    /* mutex to protect errp */
    QemuMutex error_mutex;

    /* Do we have to clean up -b/-i from old migrate parameters */
    /* This feature is deprecated and will be removed */
    bool must_remove_block_options;

    /*
     * Global switch on whether we need to store the global state
     * during migration.
     */
    bool store_global_state;

    /* Whether we send QEMU_VM_CONFIGURATION during migration */
    bool send_configuration;
    /* Whether we send section footer during migration */
    bool send_section_footer;

    /* Needed by postcopy-pause state */
    QemuSemaphore postcopy_pause_sem;
    /*
     * Whether we abort the migration if decompression errors are
     * detected at the destination. It is left at false for qemu
     * older than 3.0, since only newer qemu sends streams that
     * do not trigger spurious decompression errors.
     */
    bool decompress_error_check;
    /*
     * This variable only affects behavior when postcopy preempt mode is
     * enabled.
     *
     * When set:
     *
     * - postcopy preempt src QEMU instance will generate an EOS message at
     *   the end of migration to shut the preempt channel on dest side.
     *
     * - postcopy preempt channel will be created at the setup phase on src
         QEMU.
     *
     * When clear:
     *
     * - postcopy preempt src QEMU instance will _not_ generate an EOS
     *   message at the end of migration, the dest qemu will shutdown the
     *   channel itself.
     *
     * - postcopy preempt channel will be created at the switching phase
     *   from precopy -> postcopy (to avoid race condition of misordered
     *   creation of channels).
     *
     * NOTE: See message-id <ZBoShWArKDPpX/D7@work-vm> on qemu-devel
     * mailing list for more information on the possible race.  Everyone
     * should probably just keep this value untouched after set by the
     * machine type (or the default).
     */
    bool preempt_pre_7_2;

    /*
     * flush every channel after each section sent.
     *
     * This assures that we can't mix pages from one iteration through
     * ram pages with pages for the following iteration.  We really
     * only need to do this flush after we have go through all the
     * dirty pages.  For historical reasons, we do that after each
     * section.  This is suboptimal (we flush too many times).
     * Default value is false. (since 8.1)
     */
    bool multifd_flush_after_each_section;
    /*
     * This decides the size of guest memory chunk that will be used
     * to track dirty bitmap clearing.  The size of memory chunk will
     * be GUEST_PAGE_SIZE << N.  Say, N=0 means we will clear dirty
     * bitmap for each page to send (1<<0=1); N=10 means we will clear
     * dirty bitmap only once for 1<<10=1K continuous guest pages
     * (which is in 4M chunk).
     */
    uint8_t clear_bitmap_shift;

    /*
     * This save hostname when out-going migration starts
     */
    char *hostname;

    /* QEMU_VM_VMDESCRIPTION content filled for all non-iterable devices. */
    JSONWriter *vmdesc;

    /*
     * Indicates whether an ACK from the destination that it's OK to do
     * switchover has been received.
     */
    bool switchover_acked;
    /* Is this a rdma migration */
    bool rdma_migration;
};

void migrate_set_state(int *state, int old_state, int new_state);

void migration_fd_process_incoming(QEMUFile *f);
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
void migration_incoming_process(void);

bool  migration_has_all_channels(void);

void migrate_set_error(MigrationState *s, const Error *error);
bool migrate_has_error(MigrationState *s);

void migrate_fd_connect(MigrationState *s, Error *error_in);

bool migration_is_setup_or_active(int state);
bool migration_is_running(int state);

int migrate_init(MigrationState *s, Error **errp);
bool migration_is_blocked(Error **errp);
/* True if outgoing migration has entered postcopy phase */
bool migration_in_postcopy(void);
bool migration_postcopy_is_alive(int state);
MigrationState *migrate_get_current(void);

uint64_t ram_get_total_transferred_pages(void);

/* Sending on the return path - generic and then for each message type */
void migrate_send_rp_shut(MigrationIncomingState *mis,
                          uint32_t value);
void migrate_send_rp_pong(MigrationIncomingState *mis,
                          uint32_t value);
int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
                              ram_addr_t start, uint64_t haddr);
int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
                                      RAMBlock *rb, ram_addr_t start);
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
                                 char *block_name);
void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
int migrate_send_rp_switchover_ack(MigrationIncomingState *mis);

void dirty_bitmap_mig_before_vm_start(void);
void dirty_bitmap_mig_cancel_outgoing(void);
void dirty_bitmap_mig_cancel_incoming(void);
bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
                                      Error **errp);

void migrate_add_address(SocketAddress *address);
bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
                       Error **errp);
int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);

#define qemu_ram_foreach_block \
  #warning "Use foreach_not_ignored_block in migration code"

void migration_make_urgent_request(void);
void migration_consume_urgent_request(void);
bool migration_rate_limit(void);
void migration_bh_schedule(QEMUBHFunc *cb, void *opaque);
void migration_cancel(const Error *error);

void migration_populate_vfio_info(MigrationInfo *info);
void migration_reset_vfio_bytes_transferred(void);
void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page);

/*
 * Migration thread waiting for return path thread.  Return non-zero if an
 * error is detected.
 */
int migration_rp_wait(MigrationState *s);
/*
 * Kick the migration thread waiting for return path messages.  NOTE: the
 * name can be slightly confusing (when read as "kick the rp thread"), just
 * to remember the target is always the migration thread.
 */
void migration_rp_kick(MigrationState *s);

int migration_stop_vm(RunState state);

#endif