]> git.proxmox.com Git - mirror_qemu.git/blame - migration/migration.h
migration: do not include coroutine_int.h
[mirror_qemu.git] / migration / migration.h
CommitLineData
5bb7910a
AL
1/*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef QEMU_MIGRATION_H
15#define QEMU_MIGRATION_H
16
d4842052 17#include "exec/cpu-common.h"
a27bd6c7 18#include "hw/qdev-core.h"
9af23989 19#include "qapi/qapi-types-migration.h"
e3bf5e68 20#include "qapi/qmp/json-writer.h"
9848a404 21#include "qemu/thread.h"
9608723a 22#include "qemu/coroutine.h"
4f0fae7f 23#include "io/channel.h"
8518278a 24#include "io/channel-buffer.h"
7659505c 25#include "net/announce.h"
db1015e9 26#include "qom/object.h"
36f62f11 27#include "postcopy-ram.h"
f4584076 28#include "sysemu/runstate.h"
c9539d9b 29#include "migration/misc.h"
376253ec 30
2a4c42f1
AP
31struct PostcopyBlocktimeContext;
32
13955b89
PX
33#define MIGRATION_RESUME_ACK_VALUE (1)
34
002cad6b
PX
35/*
36 * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
37 * the benefit that all the chunks are 64 pages aligned then the
38 * bitmaps are always aligned to LONG.
39 */
40#define CLEAR_BITMAP_SHIFT_MIN 6
41/*
42 * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
43 * default value to use if no one specified.
44 */
45#define CLEAR_BITMAP_SHIFT_DEFAULT 18
46/*
47 * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
48 * big enough and make sure we won't overflow easily.
49 */
50#define CLEAR_BITMAP_SHIFT_MAX 31
51
77dadc3f
PX
52/* This is an abstraction of a "temp huge page" for postcopy's purpose */
53typedef struct {
54 /*
55 * This points to a temporary huge page as a buffer for UFFDIO_COPY. It's
56 * mmap()ed and needs to be freed when cleanup.
57 */
58 void *tmp_huge_page;
59 /*
60 * This points to the host page we're going to install for this temp page.
61 * It tells us after we've received the whole page, where we should put it.
62 */
63 void *host_addr;
64 /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
65 unsigned int target_pages;
66 /* Whether this page contains all zeros */
67 bool all_zero;
68} PostcopyTmpPage;
69
6621883f
PX
70typedef enum {
71 PREEMPT_THREAD_NONE = 0,
72 PREEMPT_THREAD_CREATED,
73 PREEMPT_THREAD_QUIT,
74} PreemptThreadStatus;
75
bca7856a
DDAG
76/* State for the incoming migration */
77struct MigrationIncomingState {
42e2aa56 78 QEMUFile *from_src_file;
755e8d7c 79 /* Previously received RAM's RAMBlock pointer */
c01b16ed 80 RAMBlock *last_recv_block[RAM_CHANNEL_MAX];
1df6ddb4
DDAG
81 /* A hook to allow cleanup at the end of incoming migration */
82 void *transport_data;
83 void (*transport_cleanup)(void *data);
095c12a4
PX
84 /*
85 * Used to sync thread creations. Note that we can't create threads in
86 * parallel with this sem.
87 */
88 QemuSemaphore thread_sync_sem;
7b89bf27
DDAG
89 /*
90 * Free at the start of the main state load, set as the main thread finishes
91 * loading state.
92 */
93 QemuEvent main_thread_load_event;
94
7659505c
DDAG
95 /* For network announces */
96 AnnounceTimer announce_timer;
97
67f11b5c 98 size_t largest_page_size;
c4faeed2 99 bool have_fault_thread;
f0a227ad 100 QemuThread fault_thread;
64f615fe
PX
101 /* Set this when we want the fault thread to quit */
102 bool fault_thread_quit;
f0a227ad 103
c76201ab
DDAG
104 bool have_listen_thread;
105 QemuThread listen_thread;
c76201ab 106
1caddf8a
DDAG
107 /* For the kernel to send us notifications */
108 int userfault_fd;
64f615fe
PX
109 /* To notify the fault_thread to wake, e.g., when need to quit */
110 int userfault_event_fd;
2e37701e 111 QEMUFile *to_src_file;
6decec93 112 QemuMutex rp_mutex; /* We send replies from multiple threads */
096bf4c8
DDAG
113 /* RAMBlock of last request sent to source */
114 RAMBlock *last_rb;
77dadc3f
PX
115 /*
116 * Number of postcopy channels including the default precopy channel, so
117 * vanilla postcopy will only contain one channel which contain both
118 * precopy and postcopy streams.
119 *
120 * This is calculated when the src requests to enable postcopy but before
121 * it starts. Its value can depend on e.g. whether postcopy preemption is
122 * enabled.
123 */
124 unsigned int postcopy_channels;
36f62f11
PX
125 /* QEMUFile for postcopy only; it'll be handled by a separate thread */
126 QEMUFile *postcopy_qemufile_dst;
5655aab0
PX
127 /*
128 * When postcopy_qemufile_dst is properly setup, this sem is posted.
129 * One can wait on this semaphore to wait until the preempt channel is
130 * properly setup.
131 */
132 QemuSemaphore postcopy_qemufile_dst_done;
36f62f11
PX
133 /* Postcopy priority thread is used to receive postcopy requested pages */
134 QemuThread postcopy_prio_thread;
6621883f
PX
135 /*
136 * Always set by the main vm load thread only, but can be read by the
137 * postcopy preempt thread. "volatile" makes sure all reads will be
d8b71d96 138 * up-to-date across cores.
6621883f
PX
139 */
140 volatile PreemptThreadStatus preempt_thread_status;
60bb3c58
PX
141 /*
142 * Used to sync between the ram load main thread and the fast ram load
143 * thread. It protects postcopy_qemufile_dst, which is the postcopy
144 * fast channel.
145 *
146 * The ram fast load thread will take it mostly for the whole lifecycle
147 * because it needs to continuously read data from the channel, and
148 * it'll only release this mutex if postcopy is interrupted, so that
149 * the ram load main thread will take this mutex over and properly
150 * release the broken channel.
151 */
152 QemuMutex postcopy_prio_thread_mutex;
77dadc3f
PX
153 /*
154 * An array of temp host huge pages to be used, one for each postcopy
155 * channel.
156 */
157 PostcopyTmpPage *postcopy_tmp_pages;
158 /* This is shared for all postcopy channels */
41d84210 159 void *postcopy_tmp_zero_page;
00fa4fc8
DDAG
160 /* PostCopyFD's for external userfaultfds & handlers of shared memory */
161 GArray *postcopy_remote_fds;
2e37701e 162
93d7af6f 163 int state;
25d0c16f 164
dd42ce24
VSO
165 /*
166 * The incoming migration coroutine, non-NULL during qemu_loadvm_state().
167 * Used to wake the migration incoming coroutine from rdma code. How much is
168 * it safe - it's a question.
169 */
170 Coroutine *loadvm_co;
171
25d0c16f 172 /* The coroutine we should enter (back) after failover */
dd42ce24 173 Coroutine *colo_incoming_co;
c937b9a6 174 QemuSemaphore colo_incoming_sem;
2a4c42f1
AP
175
176 /*
177 * PostcopyBlocktimeContext to keep information for postcopy
178 * live migration, to calculate vCPU block time
179 * */
180 struct PostcopyBlocktimeContext *blocktime_ctx;
b411b844
PX
181
182 /* notify PAUSED postcopy incoming migrations to try to continue */
183 QemuSemaphore postcopy_pause_sem_dst;
3a7804c3 184 QemuSemaphore postcopy_pause_sem_fault;
60bb3c58
PX
185 /*
186 * This semaphore is used to allow the ram fast load thread (only when
187 * postcopy preempt is enabled) fall into sleep when there's network
188 * interruption detected. When the recovery is done, the main load
189 * thread will kick the fast ram load thread using this semaphore.
190 */
191 QemuSemaphore postcopy_pause_sem_fast_load;
9aca82ba
JQ
192
193 /* List of listening socket addresses */
194 SocketAddressList *socket_address_list;
8f8bfffc
PX
195
196 /* A tree of pages that we requested to the source VM */
197 GTree *page_requested;
cf02f29e
PX
198 /*
199 * For postcopy only, count the number of requested page faults that
200 * still haven't been resolved.
201 */
8f8bfffc
PX
202 int page_requested_count;
203 /*
204 * The mutex helps to maintain the requested pages that we sent to the
205 * source, IOW, to guarantee coherent between the page_requests tree and
206 * the per-ramblock receivedmap. Note! This does not guarantee consistency
207 * of the real page copy procedures (using UFFDIO_[ZERO]COPY). E.g., even
208 * if one bit in receivedmap is cleared, UFFDIO_COPY could have happened
209 * for that page already. This is intended so that the mutex won't
210 * serialize and blocked by slow operations like UFFDIO_* ioctls. However
211 * this should be enough to make sure the page_requested tree always
212 * contains valid information.
213 */
214 QemuMutex page_request_mutex;
cf02f29e
PX
215 /*
216 * If postcopy preempt is enabled, there is a chance that the main
217 * thread finished loading its data before the preempt channel has
218 * finished loading the urgent pages. If that happens, the two threads
219 * will use this condvar to synchronize, so the main thread will always
220 * wait until all pages received.
221 */
222 QemuCond page_request_cond;
1b4adb10
AH
223
224 /*
225 * Number of devices that have yet to approve switchover. When this reaches
226 * zero an ACK that it's OK to do switchover is sent to the source. No lock
227 * is needed as this field is updated serially.
228 */
229 unsigned int switchover_ack_pending_num;
bca7856a
DDAG
230};
231
232MigrationIncomingState *migration_incoming_get_current(void);
bca7856a 233void migration_incoming_state_destroy(void);
e031149c 234void migration_incoming_transport_cleanup(MigrationIncomingState *mis);
65ace060
AP
235/*
236 * Functions to work with blocktime context
237 */
238void fill_destination_postcopy_migration_info(MigrationInfo *info);
bca7856a 239
e5cb7e76
PX
240#define TYPE_MIGRATION "migration"
241
db1015e9 242typedef struct MigrationClass MigrationClass;
8110fa1d
EH
243DECLARE_OBJ_CHECKERS(MigrationState, MigrationClass,
244 MIGRATION_OBJ, TYPE_MIGRATION)
e5cb7e76 245
db1015e9 246struct MigrationClass {
e5cb7e76
PX
247 /*< private >*/
248 DeviceClass parent_class;
db1015e9 249};
e5cb7e76 250
f16aee44 251struct MigrationState {
e5cb7e76
PX
252 /*< private >*/
253 DeviceState parent_obj;
254
255 /*< public >*/
9848a404 256 QemuThread thread;
43044ac0 257 /* Protected by qemu_file_lock */
89a02a9f 258 QEMUFile *to_dst_file;
36f62f11
PX
259 /* Postcopy specific transfer channel */
260 QEMUFile *postcopy_qemufile_src;
d0edb8a1
PX
261 /*
262 * It is posted when the preempt channel is established. Note: this is
263 * used for both the start or recover of a postcopy migration. We'll
264 * post to this sem every time a new preempt channel is created in the
265 * main thread, and we keep post() and wait() in pair.
266 */
267 QemuSemaphore postcopy_qemufile_src_sem;
8518278a 268 QIOChannelBuffer *bioc;
62df066f 269 /*
43044ac0
PX
270 * Protects to_dst_file/from_dst_file pointers. We need to make sure we
271 * won't yield or hang during the critical section, since this lock will be
272 * used in OOB command handler.
62df066f
PX
273 */
274 QemuMutex qemu_file_lock;
2594f56d 275
ad767bed
DDAG
276 /*
277 * Used to allow urgent requests to override rate limiting.
278 */
279 QemuSemaphore rate_limit_sem;
280
aecbfe9c
XG
281 /* pages already send at the beginning of current iteration */
282 uint64_t iteration_initial_pages;
283
284 /* pages transferred per second */
285 double pages_per_second;
286
287 /* bytes already send at the beginning of current iteration */
b15df1ae
PX
288 uint64_t iteration_initial_bytes;
289 /* time at the start of current iteration */
290 int64_t iteration_start_time;
291 /*
292 * The final stage happens when the remaining data is smaller than
293 * this threshold; it's calculated from the requested downtime and
8b239597 294 * measured bandwidth, or avail-switchover-bandwidth if specified.
b15df1ae 295 */
a8629e0c 296 uint64_t threshold_size;
b15df1ae 297
a0762d9e 298 /* params from 'migrate-set-parameters' */
2594f56d 299 MigrationParameters parameters;
f8bbc128 300
f8bbc128 301 int state;
70b20477
DDAG
302
303 /* State related to return path */
304 struct {
43044ac0 305 /* Protected by qemu_file_lock */
70b20477
DDAG
306 QEMUFile *from_dst_file;
307 QemuThread rp_thread;
53021ea1
PX
308 /*
309 * We can also check non-zero of rp_thread, but there's no "official"
310 * way to do this, so this bool makes it slightly more elegant.
311 * Checking from_dst_file for this is racy because from_dst_file will
312 * be cleared in the rp_thread!
313 */
314 bool rp_thread_created;
5e79a4bf
PX
315 /*
316 * Used to synchronize between migration main thread and return
317 * path thread. The migration thread can wait() on this sem, while
318 * other threads (e.g., return path thread) can kick it using a
319 * post().
320 */
edd090c7 321 QemuSemaphore rp_sem;
b28fb582
PX
322 /*
323 * We post to this when we got one PONG from dest. So far it's an
324 * easy way to know the main channel has successfully established
325 * on dest QEMU.
326 */
327 QemuSemaphore rp_pong_acks;
70b20477
DDAG
328 } rp_state;
329
7e114f8c 330 double mbps;
4af246a3
PX
331 /* Timestamp when recent migration starts (ms) */
332 int64_t start_time;
333 /* Total time used by latest migration (ms) */
d5f8a570 334 int64_t total_time;
64909f97
PX
335 /* Timestamp when VM is down (ms) to migrate the last stuff */
336 int64_t downtime_start;
9c5a9fcf 337 int64_t downtime;
2c52ddf1 338 int64_t expected_downtime;
0cec2056 339 bool capabilities[MIGRATION_CAPABILITY__MAX];
ed4fbd10 340 int64_t setup_time;
f4584076 341
7287cbd4 342 /*
f4584076 343 * State before stopping the vm by vm_stop_force_state().
7287cbd4 344 * If migration is interrupted by any reason, we need to continue
f4584076
VSO
345 * running the guest on source if it was running or restore its stopped
346 * state.
7287cbd4 347 */
f4584076 348 RunState vm_old_state;
4886a1bc
DDAG
349
350 /* Flag set once the migration has been asked to enter postcopy */
351 bool start_postcopy;
1d34e4bf
DDAG
352
353 /* Flag set once the migration thread is running (and needs joining) */
354 bool migration_thread_running;
6c595cde 355
1d2acc31
HZ
356 /* Flag set once the migration thread called bdrv_inactivate_all */
357 bool block_inactive;
358
c7e0acd5
JF
359 /* Migration is waiting for guest to unplug device */
360 QemuSemaphore wait_unplug_sem;
361
e91d8951
DDAG
362 /* Migration is paused due to pause-before-switchover */
363 QemuSemaphore pause_sem;
364
c937b9a6
HZ
365 /* The semaphore is used to notify COLO thread that failover is finished */
366 QemuSemaphore colo_exit_sem;
d59ce6f3 367
bb70b66e
LS
368 /* The event is used to notify COLO thread to do checkpoint */
369 QemuEvent colo_checkpoint_event;
479125d5
HZ
370 int64_t colo_checkpoint_time;
371 QEMUTimer *colo_delay_timer;
372
87db1a7d
JQ
373 /* The first error that has occurred.
374 We used the mutex to be able to return the 1st error message */
d59ce6f3 375 Error *error;
87db1a7d
JQ
376 /* mutex to protect errp */
377 QemuMutex error_mutex;
378
2833c59b
JQ
379 /* Do we have to clean up -b/-i from old migrate parameters */
380 /* This feature is deprecated and will be removed */
381 bool must_remove_block_options;
5272298c
PX
382
383 /*
384 * Global switch on whether we need to store the global state
385 * during migration.
386 */
387 bool store_global_state;
3df663e5 388
71dd4c1a
PX
389 /* Whether we send QEMU_VM_CONFIGURATION during migration */
390 bool send_configuration;
15c38503
PX
391 /* Whether we send section footer during migration */
392 bool send_section_footer;
b23c2ade
PX
393
394 /* Needed by postcopy-pause state */
395 QemuSemaphore postcopy_pause_sem;
f548222c
XG
396 /*
397 * Whether we abort the migration if decompression errors are
398 * detected at the destination. It is left at false for qemu
399 * older than 3.0, since only newer qemu sends streams that
400 * do not trigger spurious decompression errors.
401 */
402 bool decompress_error_check;
6621883f
PX
403 /*
404 * This variable only affects behavior when postcopy preempt mode is
405 * enabled.
406 *
407 * When set:
408 *
409 * - postcopy preempt src QEMU instance will generate an EOS message at
410 * the end of migration to shut the preempt channel on dest side.
411 *
06064a67
PX
412 * - postcopy preempt channel will be created at the setup phase on src
413 QEMU.
414 *
6621883f
PX
415 * When clear:
416 *
417 * - postcopy preempt src QEMU instance will _not_ generate an EOS
418 * message at the end of migration, the dest qemu will shutdown the
419 * channel itself.
420 *
06064a67 421 * - postcopy preempt channel will be created at the switching phase
d8b71d96 422 * from precopy -> postcopy (to avoid race condition of misordered
06064a67
PX
423 * creation of channels).
424 *
6621883f
PX
425 * NOTE: See message-id <ZBoShWArKDPpX/D7@work-vm> on qemu-devel
426 * mailing list for more information on the possible race. Everyone
427 * should probably just keep this value untouched after set by the
428 * machine type (or the default).
429 */
430 bool preempt_pre_7_2;
002cad6b 431
77c259a4
JQ
432 /*
433 * flush every channel after each section sent.
434 *
435 * This assures that we can't mix pages from one iteration through
436 * ram pages with pages for the following iteration. We really
437 * only need to do this flush after we have go through all the
438 * dirty pages. For historical reasons, we do that after each
439 * section. This is suboptimal (we flush too many times).
294e5a40 440 * Default value is false. (since 8.1)
77c259a4
JQ
441 */
442 bool multifd_flush_after_each_section;
002cad6b
PX
443 /*
444 * This decides the size of guest memory chunk that will be used
445 * to track dirty bitmap clearing. The size of memory chunk will
446 * be GUEST_PAGE_SIZE << N. Say, N=0 means we will clear dirty
447 * bitmap for each page to send (1<<0=1); N=10 means we will clear
448 * dirty bitmap only once for 1<<10=1K continuous guest pages
449 * (which is in 4M chunk).
450 */
451 uint8_t clear_bitmap_shift;
d8053e73
CZ
452
453 /*
454 * This save hostname when out-going migration starts
455 */
456 char *hostname;
e3bf5e68
DH
457
458 /* QEMU_VM_VMDESCRIPTION content filled for all non-iterable devices. */
459 JSONWriter *vmdesc;
1b4adb10
AH
460
461 /*
462 * Indicates whether an ACK from the destination that it's OK to do
463 * switchover has been received.
464 */
465 bool switchover_acked;
27fd25b0
JQ
466 /* Is this a rdma migration */
467 bool rdma_migration;
065e2813
AL
468};
469
48781e5b
HZ
470void migrate_set_state(int *state, int old_state, int new_state);
471
b0cf3bfc 472void migration_fd_process_incoming(QEMUFile *f);
49ed0d24 473void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
36c2f8be 474void migration_incoming_process(void);
511c0231 475
428d8908
JQ
476bool migration_has_all_channels(void);
477
87db1a7d 478void migrate_set_error(MigrationState *s, const Error *error);
2b2f6f41 479bool migrate_has_error(MigrationState *s);
065e2813 480
cce8040b 481void migrate_fd_connect(MigrationState *s, Error *error_in);
065e2813 482
c9539d9b
SS
483int migration_call_notifiers(MigrationState *s, MigrationEventType type,
484 Error **errp);
485
08fc4cb5 486int migrate_init(MigrationState *s, Error **errp);
24f3902b 487bool migration_is_blocked(Error **errp);
9ec055ae 488/* True if outgoing migration has entered postcopy phase */
5727309d 489bool migration_in_postcopy(void);
f8c543e8 490bool migration_postcopy_is_alive(int state);
859bc756 491MigrationState *migrate_get_current(void);
c9539d9b
SS
492bool migration_has_failed(MigrationState *);
493bool migrate_mode_is_cpr(MigrationState *);
99a0db9b 494
aecbfe9c
XG
495uint64_t ram_get_total_transferred_pages(void);
496
6decec93 497/* Sending on the return path - generic and then for each message type */
6decec93
DDAG
498void migrate_send_rp_shut(MigrationIncomingState *mis,
499 uint32_t value);
500void migrate_send_rp_pong(MigrationIncomingState *mis,
501 uint32_t value);
2e2bce16 502int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
8f8bfffc 503 ram_addr_t start, uint64_t haddr);
7a267fc4
PX
504int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
505 RAMBlock *rb, ram_addr_t start);
a335debb
PX
506void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
507 char *block_name);
13955b89 508void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
1b4adb10 509int migrate_send_rp_switchover_ack(MigrationIncomingState *mis);
6decec93 510
b35ebdf0 511void dirty_bitmap_mig_before_vm_start(void);
1499ab09
VSO
512void dirty_bitmap_mig_cancel_outgoing(void);
513void dirty_bitmap_mig_cancel_incoming(void);
31e4c354
HR
514bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
515 Error **errp);
516
9aca82ba 517void migrate_add_address(SocketAddress *address);
967f2de5
HG
518bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
519 Error **errp);
fbd162e6
YK
520int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
521
343f632c 522#define qemu_ram_foreach_block \
fbd162e6 523 #warning "Use foreach_not_ignored_block in migration code"
343f632c 524
ad767bed
DDAG
525void migration_make_urgent_request(void);
526void migration_consume_urgent_request(void);
97e1e067 527bool migration_rate_limit(void);
44d0d456 528void migration_bh_schedule(QEMUBHFunc *cb, void *opaque);
458fecca 529void migration_cancel(const Error *error);
ad767bed 530
38c482b4
AH
531void migration_populate_vfio_info(MigrationInfo *info);
532void migration_reset_vfio_bytes_transferred(void);
77dadc3f 533void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page);
43bd0bf3 534
f8c543e8
PX
535/*
536 * Migration thread waiting for return path thread. Return non-zero if an
537 * error is detected.
538 */
539int migration_rp_wait(MigrationState *s);
5e79a4bf
PX
540/*
541 * Kick the migration thread waiting for return path messages. NOTE: the
542 * name can be slightly confusing (when read as "kick the rp thread"), just
543 * to remember the target is always the migration thread.
544 */
545void migration_rp_kick(MigrationState *s);
546
5bb7910a 547#endif