]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU live migration | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <aliguori@us.ibm.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_MIGRATION_H | |
15 | #define QEMU_MIGRATION_H | |
16 | ||
17 | #include "exec/cpu-common.h" | |
18 | #include "hw/qdev-core.h" | |
19 | #include "qapi/qapi-types-migration.h" | |
20 | #include "qemu/thread.h" | |
21 | #include "qemu/coroutine_int.h" | |
22 | #include "io/channel.h" | |
23 | #include "net/announce.h" | |
24 | ||
25 | struct PostcopyBlocktimeContext; | |
26 | ||
27 | #define MIGRATION_RESUME_ACK_VALUE (1) | |
28 | ||
29 | /* | |
30 | * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us | |
31 | * the benefit that all the chunks are 64 pages aligned then the | |
32 | * bitmaps are always aligned to LONG. | |
33 | */ | |
34 | #define CLEAR_BITMAP_SHIFT_MIN 6 | |
35 | /* | |
36 | * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the | |
37 | * default value to use if no one specified. | |
38 | */ | |
39 | #define CLEAR_BITMAP_SHIFT_DEFAULT 18 | |
40 | /* | |
41 | * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be | |
42 | * big enough and make sure we won't overflow easily. | |
43 | */ | |
44 | #define CLEAR_BITMAP_SHIFT_MAX 31 | |
45 | ||
46 | /* State for the incoming migration */ | |
47 | struct MigrationIncomingState { | |
48 | QEMUFile *from_src_file; | |
49 | ||
50 | /* | |
51 | * Free at the start of the main state load, set as the main thread finishes | |
52 | * loading state. | |
53 | */ | |
54 | QemuEvent main_thread_load_event; | |
55 | ||
56 | /* For network announces */ | |
57 | AnnounceTimer announce_timer; | |
58 | ||
59 | size_t largest_page_size; | |
60 | bool have_fault_thread; | |
61 | QemuThread fault_thread; | |
62 | QemuSemaphore fault_thread_sem; | |
63 | /* Set this when we want the fault thread to quit */ | |
64 | bool fault_thread_quit; | |
65 | ||
66 | bool have_listen_thread; | |
67 | QemuThread listen_thread; | |
68 | QemuSemaphore listen_thread_sem; | |
69 | ||
70 | /* For the kernel to send us notifications */ | |
71 | int userfault_fd; | |
72 | /* To notify the fault_thread to wake, e.g., when need to quit */ | |
73 | int userfault_event_fd; | |
74 | QEMUFile *to_src_file; | |
75 | QemuMutex rp_mutex; /* We send replies from multiple threads */ | |
76 | /* RAMBlock of last request sent to source */ | |
77 | RAMBlock *last_rb; | |
78 | void *postcopy_tmp_page; | |
79 | void *postcopy_tmp_zero_page; | |
80 | /* PostCopyFD's for external userfaultfds & handlers of shared memory */ | |
81 | GArray *postcopy_remote_fds; | |
82 | ||
83 | QEMUBH *bh; | |
84 | ||
85 | int state; | |
86 | ||
87 | bool have_colo_incoming_thread; | |
88 | QemuThread colo_incoming_thread; | |
89 | /* The coroutine we should enter (back) after failover */ | |
90 | Coroutine *migration_incoming_co; | |
91 | QemuSemaphore colo_incoming_sem; | |
92 | ||
93 | /* | |
94 | * PostcopyBlocktimeContext to keep information for postcopy | |
95 | * live migration, to calculate vCPU block time | |
96 | * */ | |
97 | struct PostcopyBlocktimeContext *blocktime_ctx; | |
98 | ||
99 | /* notify PAUSED postcopy incoming migrations to try to continue */ | |
100 | bool postcopy_recover_triggered; | |
101 | QemuSemaphore postcopy_pause_sem_dst; | |
102 | QemuSemaphore postcopy_pause_sem_fault; | |
103 | ||
104 | /* List of listening socket addresses */ | |
105 | SocketAddressList *socket_address_list; | |
106 | }; | |
107 | ||
108 | MigrationIncomingState *migration_incoming_get_current(void); | |
109 | void migration_incoming_state_destroy(void); | |
110 | /* | |
111 | * Functions to work with blocktime context | |
112 | */ | |
113 | void fill_destination_postcopy_migration_info(MigrationInfo *info); | |
114 | ||
115 | #define TYPE_MIGRATION "migration" | |
116 | ||
117 | #define MIGRATION_CLASS(klass) \ | |
118 | OBJECT_CLASS_CHECK(MigrationClass, (klass), TYPE_MIGRATION) | |
119 | #define MIGRATION_OBJ(obj) \ | |
120 | OBJECT_CHECK(MigrationState, (obj), TYPE_MIGRATION) | |
121 | #define MIGRATION_GET_CLASS(obj) \ | |
122 | OBJECT_GET_CLASS(MigrationClass, (obj), TYPE_MIGRATION) | |
123 | ||
124 | typedef struct MigrationClass { | |
125 | /*< private >*/ | |
126 | DeviceClass parent_class; | |
127 | } MigrationClass; | |
128 | ||
129 | struct MigrationState | |
130 | { | |
131 | /*< private >*/ | |
132 | DeviceState parent_obj; | |
133 | ||
134 | /*< public >*/ | |
135 | QemuThread thread; | |
136 | QEMUBH *cleanup_bh; | |
137 | QEMUFile *to_dst_file; | |
138 | /* | |
139 | * Protects to_dst_file pointer. We need to make sure we won't | |
140 | * yield or hang during the critical section, since this lock will | |
141 | * be used in OOB command handler. | |
142 | */ | |
143 | QemuMutex qemu_file_lock; | |
144 | ||
145 | /* | |
146 | * Used to allow urgent requests to override rate limiting. | |
147 | */ | |
148 | QemuSemaphore rate_limit_sem; | |
149 | ||
150 | /* pages already send at the beginning of current iteration */ | |
151 | uint64_t iteration_initial_pages; | |
152 | ||
153 | /* pages transferred per second */ | |
154 | double pages_per_second; | |
155 | ||
156 | /* bytes already send at the beginning of current iteration */ | |
157 | uint64_t iteration_initial_bytes; | |
158 | /* time at the start of current iteration */ | |
159 | int64_t iteration_start_time; | |
160 | /* | |
161 | * The final stage happens when the remaining data is smaller than | |
162 | * this threshold; it's calculated from the requested downtime and | |
163 | * measured bandwidth | |
164 | */ | |
165 | int64_t threshold_size; | |
166 | ||
167 | /* params from 'migrate-set-parameters' */ | |
168 | MigrationParameters parameters; | |
169 | ||
170 | int state; | |
171 | ||
172 | /* State related to return path */ | |
173 | struct { | |
174 | QEMUFile *from_dst_file; | |
175 | QemuThread rp_thread; | |
176 | bool error; | |
177 | QemuSemaphore rp_sem; | |
178 | } rp_state; | |
179 | ||
180 | double mbps; | |
181 | /* Timestamp when recent migration starts (ms) */ | |
182 | int64_t start_time; | |
183 | /* Total time used by latest migration (ms) */ | |
184 | int64_t total_time; | |
185 | /* Timestamp when VM is down (ms) to migrate the last stuff */ | |
186 | int64_t downtime_start; | |
187 | int64_t downtime; | |
188 | int64_t expected_downtime; | |
189 | bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; | |
190 | int64_t setup_time; | |
191 | /* | |
192 | * Whether guest was running when we enter the completion stage. | |
193 | * If migration is interrupted by any reason, we need to continue | |
194 | * running the guest on source. | |
195 | */ | |
196 | bool vm_was_running; | |
197 | ||
198 | /* Flag set once the migration has been asked to enter postcopy */ | |
199 | bool start_postcopy; | |
200 | /* Flag set after postcopy has sent the device state */ | |
201 | bool postcopy_after_devices; | |
202 | ||
203 | /* Flag set once the migration thread is running (and needs joining) */ | |
204 | bool migration_thread_running; | |
205 | ||
206 | /* Flag set once the migration thread called bdrv_inactivate_all */ | |
207 | bool block_inactive; | |
208 | ||
209 | /* Migration is waiting for guest to unplug device */ | |
210 | QemuSemaphore wait_unplug_sem; | |
211 | ||
212 | /* Migration is paused due to pause-before-switchover */ | |
213 | QemuSemaphore pause_sem; | |
214 | ||
215 | /* The semaphore is used to notify COLO thread that failover is finished */ | |
216 | QemuSemaphore colo_exit_sem; | |
217 | ||
218 | /* The event is used to notify COLO thread to do checkpoint */ | |
219 | QemuEvent colo_checkpoint_event; | |
220 | int64_t colo_checkpoint_time; | |
221 | QEMUTimer *colo_delay_timer; | |
222 | ||
223 | /* The first error that has occurred. | |
224 | We used the mutex to be able to return the 1st error message */ | |
225 | Error *error; | |
226 | /* mutex to protect errp */ | |
227 | QemuMutex error_mutex; | |
228 | ||
229 | /* Do we have to clean up -b/-i from old migrate parameters */ | |
230 | /* This feature is deprecated and will be removed */ | |
231 | bool must_remove_block_options; | |
232 | ||
233 | /* | |
234 | * Global switch on whether we need to store the global state | |
235 | * during migration. | |
236 | */ | |
237 | bool store_global_state; | |
238 | ||
239 | /* Whether we send QEMU_VM_CONFIGURATION during migration */ | |
240 | bool send_configuration; | |
241 | /* Whether we send section footer during migration */ | |
242 | bool send_section_footer; | |
243 | ||
244 | /* Needed by postcopy-pause state */ | |
245 | QemuSemaphore postcopy_pause_sem; | |
246 | QemuSemaphore postcopy_pause_rp_sem; | |
247 | /* | |
248 | * Whether we abort the migration if decompression errors are | |
249 | * detected at the destination. It is left at false for qemu | |
250 | * older than 3.0, since only newer qemu sends streams that | |
251 | * do not trigger spurious decompression errors. | |
252 | */ | |
253 | bool decompress_error_check; | |
254 | ||
255 | /* | |
256 | * This decides the size of guest memory chunk that will be used | |
257 | * to track dirty bitmap clearing. The size of memory chunk will | |
258 | * be GUEST_PAGE_SIZE << N. Say, N=0 means we will clear dirty | |
259 | * bitmap for each page to send (1<<0=1); N=10 means we will clear | |
260 | * dirty bitmap only once for 1<<10=1K continuous guest pages | |
261 | * (which is in 4M chunk). | |
262 | */ | |
263 | uint8_t clear_bitmap_shift; | |
264 | }; | |
265 | ||
266 | void migrate_set_state(int *state, int old_state, int new_state); | |
267 | ||
268 | void migration_fd_process_incoming(QEMUFile *f, Error **errp); | |
269 | void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp); | |
270 | void migration_incoming_process(void); | |
271 | ||
272 | bool migration_has_all_channels(void); | |
273 | ||
274 | uint64_t migrate_max_downtime(void); | |
275 | ||
276 | void migrate_set_error(MigrationState *s, const Error *error); | |
277 | void migrate_fd_error(MigrationState *s, const Error *error); | |
278 | ||
279 | void migrate_fd_connect(MigrationState *s, Error *error_in); | |
280 | ||
281 | bool migration_is_setup_or_active(int state); | |
282 | bool migration_is_running(int state); | |
283 | ||
284 | void migrate_init(MigrationState *s); | |
285 | bool migration_is_blocked(Error **errp); | |
286 | /* True if outgoing migration has entered postcopy phase */ | |
287 | bool migration_in_postcopy(void); | |
288 | MigrationState *migrate_get_current(void); | |
289 | ||
290 | bool migrate_postcopy(void); | |
291 | ||
292 | bool migrate_release_ram(void); | |
293 | bool migrate_postcopy_ram(void); | |
294 | bool migrate_zero_blocks(void); | |
295 | bool migrate_dirty_bitmaps(void); | |
296 | bool migrate_ignore_shared(void); | |
297 | bool migrate_validate_uuid(void); | |
298 | ||
299 | bool migrate_auto_converge(void); | |
300 | bool migrate_use_multifd(void); | |
301 | bool migrate_pause_before_switchover(void); | |
302 | int migrate_multifd_channels(void); | |
303 | MultiFDCompression migrate_multifd_compression(void); | |
304 | int migrate_multifd_zlib_level(void); | |
305 | int migrate_multifd_zstd_level(void); | |
306 | ||
307 | int migrate_use_xbzrle(void); | |
308 | int64_t migrate_xbzrle_cache_size(void); | |
309 | bool migrate_colo_enabled(void); | |
310 | ||
311 | bool migrate_use_block(void); | |
312 | bool migrate_use_block_incremental(void); | |
313 | int migrate_max_cpu_throttle(void); | |
314 | bool migrate_use_return_path(void); | |
315 | ||
316 | uint64_t ram_get_total_transferred_pages(void); | |
317 | ||
318 | bool migrate_use_compression(void); | |
319 | int migrate_compress_level(void); | |
320 | int migrate_compress_threads(void); | |
321 | int migrate_compress_wait_thread(void); | |
322 | int migrate_decompress_threads(void); | |
323 | bool migrate_use_events(void); | |
324 | bool migrate_postcopy_blocktime(void); | |
325 | ||
326 | /* Sending on the return path - generic and then for each message type */ | |
327 | void migrate_send_rp_shut(MigrationIncomingState *mis, | |
328 | uint32_t value); | |
329 | void migrate_send_rp_pong(MigrationIncomingState *mis, | |
330 | uint32_t value); | |
331 | int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname, | |
332 | ram_addr_t start, size_t len); | |
333 | void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, | |
334 | char *block_name); | |
335 | void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value); | |
336 | ||
337 | void dirty_bitmap_mig_before_vm_start(void); | |
338 | void init_dirty_bitmap_incoming_migration(void); | |
339 | void migrate_add_address(SocketAddress *address); | |
340 | ||
341 | int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); | |
342 | ||
343 | #define qemu_ram_foreach_block \ | |
344 | #warning "Use foreach_not_ignored_block in migration code" | |
345 | ||
346 | void migration_make_urgent_request(void); | |
347 | void migration_consume_urgent_request(void); | |
348 | bool migration_rate_limit(void); | |
349 | ||
350 | #endif |