]>
Commit | Line | Data |
---|---|---|
eb59db53 DDAG |
1 | /* |
2 | * Postcopy migration for RAM | |
3 | * | |
4 | * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Dave Gilbert <dgilbert@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Postcopy is a migration technique where the execution flips from the | |
16 | * source to the destination before all the data has been copied. | |
17 | */ | |
18 | ||
1393a485 | 19 | #include "qemu/osdep.h" |
eb59db53 DDAG |
20 | |
21 | #include "qemu-common.h" | |
22 | #include "migration/migration.h" | |
be07b0ac | 23 | #include "postcopy-ram.h" |
eb59db53 | 24 | #include "sysemu/sysemu.h" |
371ff5a3 | 25 | #include "sysemu/balloon.h" |
eb59db53 DDAG |
26 | #include "qemu/error-report.h" |
27 | #include "trace.h" | |
28 | ||
e0b266f0 DDAG |
29 | /* Arbitrary limit on size of each discard command, |
30 | * keeps them around ~200 bytes | |
31 | */ | |
32 | #define MAX_DISCARDS_PER_COMMAND 12 | |
33 | ||
34 | struct PostcopyDiscardState { | |
35 | const char *ramblock_name; | |
e0b266f0 DDAG |
36 | uint16_t cur_entry; |
37 | /* | |
38 | * Start and length of a discard range (bytes) | |
39 | */ | |
40 | uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; | |
41 | uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; | |
42 | unsigned int nsentwords; | |
43 | unsigned int nsentcmds; | |
44 | }; | |
45 | ||
eb59db53 DDAG |
46 | /* Postcopy needs to detect accesses to pages that haven't yet been copied |
47 | * across, and efficiently map new pages in, the techniques for doing this | |
48 | * are target OS specific. | |
49 | */ | |
50 | #if defined(__linux__) | |
51 | ||
c4faeed2 | 52 | #include <poll.h> |
eb59db53 DDAG |
53 | #include <sys/ioctl.h> |
54 | #include <sys/syscall.h> | |
eb59db53 DDAG |
55 | #include <asm/types.h> /* for __u64 */ |
56 | #endif | |
57 | ||
d8b9d771 MF |
58 | #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) |
59 | #include <sys/eventfd.h> | |
eb59db53 DDAG |
60 | #include <linux/userfaultfd.h> |
61 | ||
62 | static bool ufd_version_check(int ufd) | |
63 | { | |
64 | struct uffdio_api api_struct; | |
65 | uint64_t ioctl_mask; | |
66 | ||
67 | api_struct.api = UFFD_API; | |
68 | api_struct.features = 0; | |
69 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { | |
70 | error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s", | |
71 | strerror(errno)); | |
72 | return false; | |
73 | } | |
74 | ||
75 | ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | | |
76 | (__u64)1 << _UFFDIO_UNREGISTER; | |
77 | if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { | |
78 | error_report("Missing userfault features: %" PRIx64, | |
79 | (uint64_t)(~api_struct.ioctls & ioctl_mask)); | |
80 | return false; | |
81 | } | |
82 | ||
7e8cafb7 DDAG |
83 | if (getpagesize() != ram_pagesize_summary()) { |
84 | bool have_hp = false; | |
85 | /* We've got a huge page */ | |
86 | #ifdef UFFD_FEATURE_MISSING_HUGETLBFS | |
87 | have_hp = api_struct.features & UFFD_FEATURE_MISSING_HUGETLBFS; | |
88 | #endif | |
89 | if (!have_hp) { | |
90 | error_report("Userfault on this host does not support huge pages"); | |
91 | return false; | |
92 | } | |
93 | } | |
eb59db53 DDAG |
94 | return true; |
95 | } | |
96 | ||
8679638b DDAG |
97 | /* Callback from postcopy_ram_supported_by_host block iterator. |
98 | */ | |
99 | static int test_range_shared(const char *block_name, void *host_addr, | |
100 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
101 | { | |
102 | if (qemu_ram_is_shared(qemu_ram_block_by_name(block_name))) { | |
103 | error_report("Postcopy on shared RAM (%s) is not yet supported", | |
104 | block_name); | |
105 | return 1; | |
106 | } | |
107 | return 0; | |
108 | } | |
109 | ||
58b7c17e DDAG |
110 | /* |
111 | * Note: This has the side effect of munlock'ing all of RAM, that's | |
112 | * normally fine since if the postcopy succeeds it gets turned back on at the | |
113 | * end. | |
114 | */ | |
eb59db53 DDAG |
115 | bool postcopy_ram_supported_by_host(void) |
116 | { | |
117 | long pagesize = getpagesize(); | |
118 | int ufd = -1; | |
119 | bool ret = false; /* Error unless we change it */ | |
120 | void *testarea = NULL; | |
121 | struct uffdio_register reg_struct; | |
122 | struct uffdio_range range_struct; | |
123 | uint64_t feature_mask; | |
124 | ||
20afaed9 | 125 | if (qemu_target_page_size() > pagesize) { |
eb59db53 DDAG |
126 | error_report("Target page size bigger than host page size"); |
127 | goto out; | |
128 | } | |
129 | ||
130 | ufd = syscall(__NR_userfaultfd, O_CLOEXEC); | |
131 | if (ufd == -1) { | |
132 | error_report("%s: userfaultfd not available: %s", __func__, | |
133 | strerror(errno)); | |
134 | goto out; | |
135 | } | |
136 | ||
137 | /* Version and features check */ | |
138 | if (!ufd_version_check(ufd)) { | |
139 | goto out; | |
140 | } | |
141 | ||
8679638b DDAG |
142 | /* We don't support postcopy with shared RAM yet */ |
143 | if (qemu_ram_foreach_block(test_range_shared, NULL)) { | |
144 | goto out; | |
145 | } | |
146 | ||
58b7c17e DDAG |
147 | /* |
148 | * userfault and mlock don't go together; we'll put it back later if | |
149 | * it was enabled. | |
150 | */ | |
151 | if (munlockall()) { | |
152 | error_report("%s: munlockall: %s", __func__, strerror(errno)); | |
153 | return -1; | |
154 | } | |
155 | ||
eb59db53 DDAG |
156 | /* |
157 | * We need to check that the ops we need are supported on anon memory | |
158 | * To do that we need to register a chunk and see the flags that | |
159 | * are returned. | |
160 | */ | |
161 | testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
162 | MAP_ANONYMOUS, -1, 0); | |
163 | if (testarea == MAP_FAILED) { | |
164 | error_report("%s: Failed to map test area: %s", __func__, | |
165 | strerror(errno)); | |
166 | goto out; | |
167 | } | |
168 | g_assert(((size_t)testarea & (pagesize-1)) == 0); | |
169 | ||
170 | reg_struct.range.start = (uintptr_t)testarea; | |
171 | reg_struct.range.len = pagesize; | |
172 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; | |
173 | ||
174 | if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { | |
175 | error_report("%s userfault register: %s", __func__, strerror(errno)); | |
176 | goto out; | |
177 | } | |
178 | ||
179 | range_struct.start = (uintptr_t)testarea; | |
180 | range_struct.len = pagesize; | |
181 | if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { | |
182 | error_report("%s userfault unregister: %s", __func__, strerror(errno)); | |
183 | goto out; | |
184 | } | |
185 | ||
186 | feature_mask = (__u64)1 << _UFFDIO_WAKE | | |
187 | (__u64)1 << _UFFDIO_COPY | | |
188 | (__u64)1 << _UFFDIO_ZEROPAGE; | |
189 | if ((reg_struct.ioctls & feature_mask) != feature_mask) { | |
190 | error_report("Missing userfault map features: %" PRIx64, | |
191 | (uint64_t)(~reg_struct.ioctls & feature_mask)); | |
192 | goto out; | |
193 | } | |
194 | ||
195 | /* Success! */ | |
196 | ret = true; | |
197 | out: | |
198 | if (testarea) { | |
199 | munmap(testarea, pagesize); | |
200 | } | |
201 | if (ufd != -1) { | |
202 | close(ufd); | |
203 | } | |
204 | return ret; | |
205 | } | |
206 | ||
1caddf8a DDAG |
207 | /* |
208 | * Setup an area of RAM so that it *can* be used for postcopy later; this | |
209 | * must be done right at the start prior to pre-copy. | |
210 | * opaque should be the MIS. | |
211 | */ | |
212 | static int init_range(const char *block_name, void *host_addr, | |
213 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
214 | { | |
1caddf8a DDAG |
215 | trace_postcopy_init_range(block_name, host_addr, offset, length); |
216 | ||
217 | /* | |
218 | * We need the whole of RAM to be truly empty for postcopy, so things | |
219 | * like ROMs and any data tables built during init must be zero'd | |
220 | * - we're going to get the copy from the source anyway. | |
221 | * (Precopy will just overwrite this data, so doesn't need the discard) | |
222 | */ | |
aaa2064c | 223 | if (ram_discard_range(block_name, 0, length)) { |
1caddf8a DDAG |
224 | return -1; |
225 | } | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | /* | |
231 | * At the end of migration, undo the effects of init_range | |
232 | * opaque should be the MIS. | |
233 | */ | |
234 | static int cleanup_range(const char *block_name, void *host_addr, | |
235 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
236 | { | |
237 | MigrationIncomingState *mis = opaque; | |
238 | struct uffdio_range range_struct; | |
239 | trace_postcopy_cleanup_range(block_name, host_addr, offset, length); | |
240 | ||
241 | /* | |
242 | * We turned off hugepage for the precopy stage with postcopy enabled | |
243 | * we can turn it back on now. | |
244 | */ | |
1d741439 | 245 | qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); |
1caddf8a DDAG |
246 | |
247 | /* | |
248 | * We can also turn off userfault now since we should have all the | |
249 | * pages. It can be useful to leave it on to debug postcopy | |
250 | * if you're not sure it's always getting every page. | |
251 | */ | |
252 | range_struct.start = (uintptr_t)host_addr; | |
253 | range_struct.len = length; | |
254 | ||
255 | if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { | |
256 | error_report("%s: userfault unregister %s", __func__, strerror(errno)); | |
257 | ||
258 | return -1; | |
259 | } | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | /* | |
265 | * Initialise postcopy-ram, setting the RAM to a state where we can go into | |
266 | * postcopy later; must be called prior to any precopy. | |
267 | * called from arch_init's similarly named ram_postcopy_incoming_init | |
268 | */ | |
269 | int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) | |
270 | { | |
aaa2064c | 271 | if (qemu_ram_foreach_block(init_range, NULL)) { |
1caddf8a DDAG |
272 | return -1; |
273 | } | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | /* | |
279 | * At the end of a migration where postcopy_ram_incoming_init was called. | |
280 | */ | |
281 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | |
282 | { | |
c4faeed2 DDAG |
283 | trace_postcopy_ram_incoming_cleanup_entry(); |
284 | ||
285 | if (mis->have_fault_thread) { | |
286 | uint64_t tmp64; | |
287 | ||
288 | if (qemu_ram_foreach_block(cleanup_range, mis)) { | |
289 | return -1; | |
290 | } | |
291 | /* | |
292 | * Tell the fault_thread to exit, it's an eventfd that should | |
293 | * currently be at 0, we're going to increment it to 1 | |
294 | */ | |
295 | tmp64 = 1; | |
296 | if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) { | |
297 | trace_postcopy_ram_incoming_cleanup_join(); | |
298 | qemu_thread_join(&mis->fault_thread); | |
299 | } else { | |
300 | /* Not much we can do here, but may as well report it */ | |
301 | error_report("%s: incrementing userfault_quit_fd: %s", __func__, | |
302 | strerror(errno)); | |
303 | } | |
304 | trace_postcopy_ram_incoming_cleanup_closeuf(); | |
305 | close(mis->userfault_fd); | |
306 | close(mis->userfault_quit_fd); | |
307 | mis->have_fault_thread = false; | |
1caddf8a DDAG |
308 | } |
309 | ||
371ff5a3 DDAG |
310 | qemu_balloon_inhibit(false); |
311 | ||
58b7c17e DDAG |
312 | if (enable_mlock) { |
313 | if (os_mlock() < 0) { | |
314 | error_report("mlock: %s", strerror(errno)); | |
315 | /* | |
316 | * It doesn't feel right to fail at this point, we have a valid | |
317 | * VM state. | |
318 | */ | |
319 | } | |
320 | } | |
321 | ||
c4faeed2 DDAG |
322 | postcopy_state_set(POSTCOPY_INCOMING_END); |
323 | migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); | |
324 | ||
696ed9a9 | 325 | if (mis->postcopy_tmp_page) { |
df9ff5e1 | 326 | munmap(mis->postcopy_tmp_page, mis->largest_page_size); |
696ed9a9 DDAG |
327 | mis->postcopy_tmp_page = NULL; |
328 | } | |
41d84210 DDAG |
329 | if (mis->postcopy_tmp_zero_page) { |
330 | munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); | |
331 | mis->postcopy_tmp_zero_page = NULL; | |
332 | } | |
c4faeed2 | 333 | trace_postcopy_ram_incoming_cleanup_exit(); |
1caddf8a DDAG |
334 | return 0; |
335 | } | |
336 | ||
f9527107 DDAG |
337 | /* |
338 | * Disable huge pages on an area | |
339 | */ | |
340 | static int nhp_range(const char *block_name, void *host_addr, | |
341 | ram_addr_t offset, ram_addr_t length, void *opaque) | |
342 | { | |
343 | trace_postcopy_nhp_range(block_name, host_addr, offset, length); | |
344 | ||
345 | /* | |
346 | * Before we do discards we need to ensure those discards really | |
347 | * do delete areas of the page, even if THP thinks a hugepage would | |
348 | * be a good idea, so force hugepages off. | |
349 | */ | |
1d741439 | 350 | qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); |
f9527107 DDAG |
351 | |
352 | return 0; | |
353 | } | |
354 | ||
355 | /* | |
356 | * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard | |
357 | * however leaving it until after precopy means that most of the precopy | |
358 | * data is still THPd | |
359 | */ | |
360 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) | |
361 | { | |
362 | if (qemu_ram_foreach_block(nhp_range, mis)) { | |
363 | return -1; | |
364 | } | |
365 | ||
366 | postcopy_state_set(POSTCOPY_INCOMING_DISCARD); | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
f0a227ad DDAG |
371 | /* |
372 | * Mark the given area of RAM as requiring notification to unwritten areas | |
373 | * Used as a callback on qemu_ram_foreach_block. | |
374 | * host_addr: Base of area to mark | |
375 | * offset: Offset in the whole ram arena | |
376 | * length: Length of the section | |
377 | * opaque: MigrationIncomingState pointer | |
378 | * Returns 0 on success | |
379 | */ | |
380 | static int ram_block_enable_notify(const char *block_name, void *host_addr, | |
381 | ram_addr_t offset, ram_addr_t length, | |
382 | void *opaque) | |
383 | { | |
384 | MigrationIncomingState *mis = opaque; | |
385 | struct uffdio_register reg_struct; | |
386 | ||
387 | reg_struct.range.start = (uintptr_t)host_addr; | |
388 | reg_struct.range.len = length; | |
389 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; | |
390 | ||
391 | /* Now tell our userfault_fd that it's responsible for this area */ | |
392 | if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { | |
393 | error_report("%s userfault register: %s", __func__, strerror(errno)); | |
394 | return -1; | |
395 | } | |
665414ad DDAG |
396 | if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { |
397 | error_report("%s userfault: Region doesn't support COPY", __func__); | |
398 | return -1; | |
399 | } | |
f0a227ad DDAG |
400 | |
401 | return 0; | |
402 | } | |
403 | ||
404 | /* | |
405 | * Handle faults detected by the USERFAULT markings | |
406 | */ | |
407 | static void *postcopy_ram_fault_thread(void *opaque) | |
408 | { | |
409 | MigrationIncomingState *mis = opaque; | |
c4faeed2 DDAG |
410 | struct uffd_msg msg; |
411 | int ret; | |
c4faeed2 DDAG |
412 | RAMBlock *rb = NULL; |
413 | RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */ | |
f0a227ad | 414 | |
c4faeed2 | 415 | trace_postcopy_ram_fault_thread_entry(); |
f0a227ad | 416 | qemu_sem_post(&mis->fault_thread_sem); |
f0a227ad | 417 | |
c4faeed2 DDAG |
418 | while (true) { |
419 | ram_addr_t rb_offset; | |
c4faeed2 DDAG |
420 | struct pollfd pfd[2]; |
421 | ||
422 | /* | |
423 | * We're mainly waiting for the kernel to give us a faulting HVA, | |
424 | * however we can be told to quit via userfault_quit_fd which is | |
425 | * an eventfd | |
426 | */ | |
427 | pfd[0].fd = mis->userfault_fd; | |
428 | pfd[0].events = POLLIN; | |
429 | pfd[0].revents = 0; | |
430 | pfd[1].fd = mis->userfault_quit_fd; | |
431 | pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ | |
432 | pfd[1].revents = 0; | |
433 | ||
434 | if (poll(pfd, 2, -1 /* Wait forever */) == -1) { | |
435 | error_report("%s: userfault poll: %s", __func__, strerror(errno)); | |
436 | break; | |
437 | } | |
438 | ||
439 | if (pfd[1].revents) { | |
440 | trace_postcopy_ram_fault_thread_quit(); | |
441 | break; | |
442 | } | |
443 | ||
444 | ret = read(mis->userfault_fd, &msg, sizeof(msg)); | |
445 | if (ret != sizeof(msg)) { | |
446 | if (errno == EAGAIN) { | |
447 | /* | |
448 | * if a wake up happens on the other thread just after | |
449 | * the poll, there is nothing to read. | |
450 | */ | |
451 | continue; | |
452 | } | |
453 | if (ret < 0) { | |
454 | error_report("%s: Failed to read full userfault message: %s", | |
455 | __func__, strerror(errno)); | |
456 | break; | |
457 | } else { | |
458 | error_report("%s: Read %d bytes from userfaultfd expected %zd", | |
459 | __func__, ret, sizeof(msg)); | |
460 | break; /* Lost alignment, don't know what we'd read next */ | |
461 | } | |
462 | } | |
463 | if (msg.event != UFFD_EVENT_PAGEFAULT) { | |
464 | error_report("%s: Read unexpected event %ud from userfaultfd", | |
465 | __func__, msg.event); | |
466 | continue; /* It's not a page fault, shouldn't happen */ | |
467 | } | |
468 | ||
469 | rb = qemu_ram_block_from_host( | |
470 | (void *)(uintptr_t)msg.arg.pagefault.address, | |
f615f396 | 471 | true, &rb_offset); |
c4faeed2 DDAG |
472 | if (!rb) { |
473 | error_report("postcopy_ram_fault_thread: Fault outside guest: %" | |
474 | PRIx64, (uint64_t)msg.arg.pagefault.address); | |
475 | break; | |
476 | } | |
477 | ||
332847f0 | 478 | rb_offset &= ~(qemu_ram_pagesize(rb) - 1); |
c4faeed2 DDAG |
479 | trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, |
480 | qemu_ram_get_idstr(rb), | |
481 | rb_offset); | |
482 | ||
483 | /* | |
484 | * Send the request to the source - we want to request one | |
485 | * of our host page sizes (which is >= TPS) | |
486 | */ | |
487 | if (rb != last_rb) { | |
488 | last_rb = rb; | |
489 | migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb), | |
332847f0 | 490 | rb_offset, qemu_ram_pagesize(rb)); |
c4faeed2 DDAG |
491 | } else { |
492 | /* Save some space */ | |
493 | migrate_send_rp_req_pages(mis, NULL, | |
332847f0 | 494 | rb_offset, qemu_ram_pagesize(rb)); |
c4faeed2 DDAG |
495 | } |
496 | } | |
497 | trace_postcopy_ram_fault_thread_exit(); | |
f0a227ad DDAG |
498 | return NULL; |
499 | } | |
500 | ||
501 | int postcopy_ram_enable_notify(MigrationIncomingState *mis) | |
502 | { | |
c4faeed2 DDAG |
503 | /* Open the fd for the kernel to give us userfaults */ |
504 | mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); | |
505 | if (mis->userfault_fd == -1) { | |
506 | error_report("%s: Failed to open userfault fd: %s", __func__, | |
507 | strerror(errno)); | |
508 | return -1; | |
509 | } | |
510 | ||
511 | /* | |
512 | * Although the host check already tested the API, we need to | |
513 | * do the check again as an ABI handshake on the new fd. | |
514 | */ | |
515 | if (!ufd_version_check(mis->userfault_fd)) { | |
516 | return -1; | |
517 | } | |
518 | ||
519 | /* Now an eventfd we use to tell the fault-thread to quit */ | |
520 | mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC); | |
521 | if (mis->userfault_quit_fd == -1) { | |
522 | error_report("%s: Opening userfault_quit_fd: %s", __func__, | |
523 | strerror(errno)); | |
524 | close(mis->userfault_fd); | |
525 | return -1; | |
526 | } | |
527 | ||
f0a227ad DDAG |
528 | qemu_sem_init(&mis->fault_thread_sem, 0); |
529 | qemu_thread_create(&mis->fault_thread, "postcopy/fault", | |
530 | postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); | |
531 | qemu_sem_wait(&mis->fault_thread_sem); | |
532 | qemu_sem_destroy(&mis->fault_thread_sem); | |
c4faeed2 | 533 | mis->have_fault_thread = true; |
f0a227ad DDAG |
534 | |
535 | /* Mark so that we get notified of accesses to unwritten areas */ | |
536 | if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { | |
537 | return -1; | |
538 | } | |
539 | ||
371ff5a3 DDAG |
540 | /* |
541 | * Ballooning can mark pages as absent while we're postcopying | |
542 | * that would cause false userfaults. | |
543 | */ | |
544 | qemu_balloon_inhibit(true); | |
545 | ||
c4faeed2 DDAG |
546 | trace_postcopy_ram_enable_notify(); |
547 | ||
f0a227ad DDAG |
548 | return 0; |
549 | } | |
550 | ||
696ed9a9 DDAG |
551 | /* |
552 | * Place a host page (from) at (host) atomically | |
553 | * returns 0 on success | |
554 | */ | |
df9ff5e1 DDAG |
555 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
556 | size_t pagesize) | |
696ed9a9 DDAG |
557 | { |
558 | struct uffdio_copy copy_struct; | |
559 | ||
560 | copy_struct.dst = (uint64_t)(uintptr_t)host; | |
561 | copy_struct.src = (uint64_t)(uintptr_t)from; | |
df9ff5e1 | 562 | copy_struct.len = pagesize; |
696ed9a9 DDAG |
563 | copy_struct.mode = 0; |
564 | ||
565 | /* copy also acks to the kernel waking the stalled thread up | |
566 | * TODO: We can inhibit that ack and only do it if it was requested | |
567 | * which would be slightly cheaper, but we'd have to be careful | |
568 | * of the order of updating our page state. | |
569 | */ | |
570 | if (ioctl(mis->userfault_fd, UFFDIO_COPY, ©_struct)) { | |
571 | int e = errno; | |
df9ff5e1 DDAG |
572 | error_report("%s: %s copy host: %p from: %p (size: %zd)", |
573 | __func__, strerror(e), host, from, pagesize); | |
696ed9a9 DDAG |
574 | |
575 | return -e; | |
576 | } | |
577 | ||
578 | trace_postcopy_place_page(host); | |
579 | return 0; | |
580 | } | |
581 | ||
582 | /* | |
583 | * Place a zero page at (host) atomically | |
584 | * returns 0 on success | |
585 | */ | |
df9ff5e1 DDAG |
586 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
587 | size_t pagesize) | |
696ed9a9 | 588 | { |
df9ff5e1 | 589 | trace_postcopy_place_page_zero(host); |
696ed9a9 | 590 | |
df9ff5e1 DDAG |
591 | if (pagesize == getpagesize()) { |
592 | struct uffdio_zeropage zero_struct; | |
593 | zero_struct.range.start = (uint64_t)(uintptr_t)host; | |
594 | zero_struct.range.len = getpagesize(); | |
595 | zero_struct.mode = 0; | |
696ed9a9 | 596 | |
df9ff5e1 DDAG |
597 | if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) { |
598 | int e = errno; | |
599 | error_report("%s: %s zero host: %p", | |
600 | __func__, strerror(e), host); | |
696ed9a9 | 601 | |
df9ff5e1 DDAG |
602 | return -e; |
603 | } | |
604 | } else { | |
41d84210 DDAG |
605 | /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */ |
606 | if (!mis->postcopy_tmp_zero_page) { | |
607 | mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, | |
608 | PROT_READ | PROT_WRITE, | |
609 | MAP_PRIVATE | MAP_ANONYMOUS, | |
610 | -1, 0); | |
611 | if (mis->postcopy_tmp_zero_page == MAP_FAILED) { | |
612 | int e = errno; | |
613 | mis->postcopy_tmp_zero_page = NULL; | |
614 | error_report("%s: %s mapping large zero page", | |
615 | __func__, strerror(e)); | |
616 | return -e; | |
617 | } | |
618 | memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); | |
619 | } | |
620 | return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, | |
621 | pagesize); | |
696ed9a9 DDAG |
622 | } |
623 | ||
696ed9a9 DDAG |
624 | return 0; |
625 | } | |
626 | ||
627 | /* | |
628 | * Returns a target page of memory that can be mapped at a later point in time | |
629 | * using postcopy_place_page | |
630 | * The same address is used repeatedly, postcopy_place_page just takes the | |
631 | * backing page away. | |
632 | * Returns: Pointer to allocated page | |
633 | * | |
634 | */ | |
635 | void *postcopy_get_tmp_page(MigrationIncomingState *mis) | |
636 | { | |
637 | if (!mis->postcopy_tmp_page) { | |
df9ff5e1 | 638 | mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, |
696ed9a9 DDAG |
639 | PROT_READ | PROT_WRITE, MAP_PRIVATE | |
640 | MAP_ANONYMOUS, -1, 0); | |
0e8b3cdf EY |
641 | if (mis->postcopy_tmp_page == MAP_FAILED) { |
642 | mis->postcopy_tmp_page = NULL; | |
696ed9a9 DDAG |
643 | error_report("%s: %s", __func__, strerror(errno)); |
644 | return NULL; | |
645 | } | |
646 | } | |
647 | ||
648 | return mis->postcopy_tmp_page; | |
649 | } | |
650 | ||
eb59db53 DDAG |
651 | #else |
652 | /* No target OS support, stubs just fail */ | |
653 | bool postcopy_ram_supported_by_host(void) | |
654 | { | |
655 | error_report("%s: No OS support", __func__); | |
656 | return false; | |
657 | } | |
658 | ||
1caddf8a DDAG |
659 | int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) |
660 | { | |
661 | error_report("postcopy_ram_incoming_init: No OS support"); | |
662 | return -1; | |
663 | } | |
664 | ||
665 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | |
666 | { | |
667 | assert(0); | |
668 | return -1; | |
669 | } | |
670 | ||
f9527107 DDAG |
671 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) |
672 | { | |
673 | assert(0); | |
674 | return -1; | |
675 | } | |
676 | ||
f0a227ad DDAG |
677 | int postcopy_ram_enable_notify(MigrationIncomingState *mis) |
678 | { | |
679 | assert(0); | |
680 | return -1; | |
681 | } | |
696ed9a9 | 682 | |
df9ff5e1 DDAG |
683 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
684 | size_t pagesize) | |
696ed9a9 DDAG |
685 | { |
686 | assert(0); | |
687 | return -1; | |
688 | } | |
689 | ||
df9ff5e1 DDAG |
690 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
691 | size_t pagesize) | |
696ed9a9 DDAG |
692 | { |
693 | assert(0); | |
694 | return -1; | |
695 | } | |
696 | ||
697 | void *postcopy_get_tmp_page(MigrationIncomingState *mis) | |
698 | { | |
699 | assert(0); | |
700 | return NULL; | |
701 | } | |
702 | ||
eb59db53 DDAG |
703 | #endif |
704 | ||
e0b266f0 DDAG |
705 | /* ------------------------------------------------------------------------- */ |
706 | ||
707 | /** | |
708 | * postcopy_discard_send_init: Called at the start of each RAMBlock before | |
709 | * asking to discard individual ranges. | |
710 | * | |
711 | * @ms: The current migration state. | |
712 | * @offset: the bitmap offset of the named RAMBlock in the migration | |
713 | * bitmap. | |
714 | * @name: RAMBlock that discards will operate on. | |
715 | * | |
716 | * returns: a new PDS. | |
717 | */ | |
718 | PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, | |
e0b266f0 DDAG |
719 | const char *name) |
720 | { | |
721 | PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState)); | |
722 | ||
723 | if (res) { | |
724 | res->ramblock_name = name; | |
e0b266f0 DDAG |
725 | } |
726 | ||
727 | return res; | |
728 | } | |
729 | ||
730 | /** | |
731 | * postcopy_discard_send_range: Called by the bitmap code for each chunk to | |
732 | * discard. May send a discard message, may just leave it queued to | |
733 | * be sent later. | |
734 | * | |
735 | * @ms: Current migration state. | |
736 | * @pds: Structure initialised by postcopy_discard_send_init(). | |
737 | * @start,@length: a range of pages in the migration bitmap in the | |
738 | * RAM block passed to postcopy_discard_send_init() (length=1 is one page) | |
739 | */ | |
740 | void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, | |
741 | unsigned long start, unsigned long length) | |
742 | { | |
20afaed9 | 743 | size_t tp_size = qemu_target_page_size(); |
e0b266f0 | 744 | /* Convert to byte offsets within the RAM block */ |
6b6712ef | 745 | pds->start_list[pds->cur_entry] = start * tp_size; |
20afaed9 | 746 | pds->length_list[pds->cur_entry] = length * tp_size; |
e0b266f0 DDAG |
747 | trace_postcopy_discard_send_range(pds->ramblock_name, start, length); |
748 | pds->cur_entry++; | |
749 | pds->nsentwords++; | |
750 | ||
751 | if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) { | |
752 | /* Full set, ship it! */ | |
89a02a9f HZ |
753 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
754 | pds->ramblock_name, | |
e0b266f0 DDAG |
755 | pds->cur_entry, |
756 | pds->start_list, | |
757 | pds->length_list); | |
758 | pds->nsentcmds++; | |
759 | pds->cur_entry = 0; | |
760 | } | |
761 | } | |
762 | ||
763 | /** | |
764 | * postcopy_discard_send_finish: Called at the end of each RAMBlock by the | |
765 | * bitmap code. Sends any outstanding discard messages, frees the PDS | |
766 | * | |
767 | * @ms: Current migration state. | |
768 | * @pds: Structure initialised by postcopy_discard_send_init(). | |
769 | */ | |
770 | void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds) | |
771 | { | |
772 | /* Anything unsent? */ | |
773 | if (pds->cur_entry) { | |
89a02a9f HZ |
774 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
775 | pds->ramblock_name, | |
e0b266f0 DDAG |
776 | pds->cur_entry, |
777 | pds->start_list, | |
778 | pds->length_list); | |
779 | pds->nsentcmds++; | |
780 | } | |
781 | ||
782 | trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords, | |
783 | pds->nsentcmds); | |
784 | ||
785 | g_free(pds); | |
786 | } |