2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
20 #include "exec/target_page.h"
21 #include "migration.h"
22 #include "qemu-file.h"
24 #include "postcopy-ram.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/balloon.h"
28 #include "qemu/error-report.h"
31 /* Arbitrary limit on size of each discard command,
32 * keeps them around ~200 bytes
34 #define MAX_DISCARDS_PER_COMMAND 12
36 struct PostcopyDiscardState
{
37 const char *ramblock_name
;
40 * Start and length of a discard range (bytes)
42 uint64_t start_list
[MAX_DISCARDS_PER_COMMAND
];
43 uint64_t length_list
[MAX_DISCARDS_PER_COMMAND
];
44 unsigned int nsentwords
;
45 unsigned int nsentcmds
;
48 /* Postcopy needs to detect accesses to pages that haven't yet been copied
49 * across, and efficiently map new pages in, the techniques for doing this
50 * are target OS specific.
52 #if defined(__linux__)
55 #include <sys/ioctl.h>
56 #include <sys/syscall.h>
57 #include <asm/types.h> /* for __u64 */
60 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
61 #include <sys/eventfd.h>
62 #include <linux/userfaultfd.h>
64 typedef struct PostcopyBlocktimeContext
{
65 /* time when page fault initiated per vCPU */
66 int64_t *page_fault_vcpu_time
;
67 /* page address per vCPU */
69 int64_t total_blocktime
;
70 /* blocktime per vCPU */
71 int64_t *vcpu_blocktime
;
72 /* point in time when last page fault was initiated */
74 /* number of vCPU are suspended */
78 * Handler for exit event, necessary for
79 * releasing whole blocktime_ctx
81 Notifier exit_notifier
;
82 } PostcopyBlocktimeContext
;
84 static void destroy_blocktime_context(struct PostcopyBlocktimeContext
*ctx
)
86 g_free(ctx
->page_fault_vcpu_time
);
87 g_free(ctx
->vcpu_addr
);
88 g_free(ctx
->vcpu_blocktime
);
92 static void migration_exit_cb(Notifier
*n
, void *data
)
94 PostcopyBlocktimeContext
*ctx
= container_of(n
, PostcopyBlocktimeContext
,
96 destroy_blocktime_context(ctx
);
99 static struct PostcopyBlocktimeContext
*blocktime_context_new(void)
101 PostcopyBlocktimeContext
*ctx
= g_new0(PostcopyBlocktimeContext
, 1);
102 ctx
->page_fault_vcpu_time
= g_new0(int64_t, smp_cpus
);
103 ctx
->vcpu_addr
= g_new0(uintptr_t, smp_cpus
);
104 ctx
->vcpu_blocktime
= g_new0(int64_t, smp_cpus
);
106 ctx
->exit_notifier
.notify
= migration_exit_cb
;
107 qemu_add_exit_notifier(&ctx
->exit_notifier
);
111 static int64List
*get_vcpu_blocktime_list(PostcopyBlocktimeContext
*ctx
)
113 int64List
*list
= NULL
, *entry
= NULL
;
116 for (i
= smp_cpus
- 1; i
>= 0; i
--) {
117 entry
= g_new0(int64List
, 1);
118 entry
->value
= ctx
->vcpu_blocktime
[i
];
127 * This function just populates MigrationInfo from postcopy's
128 * blocktime context. It will not populate MigrationInfo,
129 * unless postcopy-blocktime capability was set.
131 * @info: pointer to MigrationInfo to populate
133 void fill_destination_postcopy_migration_info(MigrationInfo
*info
)
135 MigrationIncomingState
*mis
= migration_incoming_get_current();
136 PostcopyBlocktimeContext
*bc
= mis
->blocktime_ctx
;
142 info
->has_postcopy_blocktime
= true;
143 info
->postcopy_blocktime
= bc
->total_blocktime
;
144 info
->has_postcopy_vcpu_blocktime
= true;
145 info
->postcopy_vcpu_blocktime
= get_vcpu_blocktime_list(bc
);
148 static uint64_t get_postcopy_total_blocktime(void)
150 MigrationIncomingState
*mis
= migration_incoming_get_current();
151 PostcopyBlocktimeContext
*bc
= mis
->blocktime_ctx
;
157 return bc
->total_blocktime
;
161 * receive_ufd_features: check userfault fd features, to request only supported
162 * features in the future.
164 * Returns: true on success
166 * __NR_userfaultfd - should be checked before
167 * @features: out parameter will contain uffdio_api.features provided by kernel
170 static bool receive_ufd_features(uint64_t *features
)
172 struct uffdio_api api_struct
= {0};
176 /* if we are here __NR_userfaultfd should exists */
177 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
179 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__
,
185 api_struct
.api
= UFFD_API
;
186 api_struct
.features
= 0;
187 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
188 error_report("%s: UFFDIO_API failed: %s", __func__
,
194 *features
= api_struct
.features
;
202 * request_ufd_features: this function should be called only once on a newly
203 * opened ufd, subsequent calls will lead to error.
205 * Returns: true on succes
207 * @ufd: fd obtained from userfaultfd syscall
208 * @features: bit mask see UFFD_API_FEATURES
210 static bool request_ufd_features(int ufd
, uint64_t features
)
212 struct uffdio_api api_struct
= {0};
215 api_struct
.api
= UFFD_API
;
216 api_struct
.features
= features
;
217 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
218 error_report("%s failed: UFFDIO_API failed: %s", __func__
,
223 ioctl_mask
= (__u64
)1 << _UFFDIO_REGISTER
|
224 (__u64
)1 << _UFFDIO_UNREGISTER
;
225 if ((api_struct
.ioctls
& ioctl_mask
) != ioctl_mask
) {
226 error_report("Missing userfault features: %" PRIx64
,
227 (uint64_t)(~api_struct
.ioctls
& ioctl_mask
));
234 static bool ufd_check_and_apply(int ufd
, MigrationIncomingState
*mis
)
236 uint64_t asked_features
= 0;
237 static uint64_t supported_features
;
240 * it's not possible to
241 * request UFFD_API twice per one fd
242 * userfault fd features is persistent
244 if (!supported_features
) {
245 if (!receive_ufd_features(&supported_features
)) {
246 error_report("%s failed", __func__
);
251 #ifdef UFFD_FEATURE_THREAD_ID
252 if (migrate_postcopy_blocktime() && mis
&&
253 UFFD_FEATURE_THREAD_ID
& supported_features
) {
254 /* kernel supports that feature */
255 /* don't create blocktime_context if it exists */
256 if (!mis
->blocktime_ctx
) {
257 mis
->blocktime_ctx
= blocktime_context_new();
260 asked_features
|= UFFD_FEATURE_THREAD_ID
;
265 * request features, even if asked_features is 0, due to
266 * kernel expects UFFD_API before UFFDIO_REGISTER, per
267 * userfault file descriptor
269 if (!request_ufd_features(ufd
, asked_features
)) {
270 error_report("%s failed: features %" PRIu64
, __func__
,
275 if (getpagesize() != ram_pagesize_summary()) {
276 bool have_hp
= false;
277 /* We've got a huge page */
278 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
279 have_hp
= supported_features
& UFFD_FEATURE_MISSING_HUGETLBFS
;
282 error_report("Userfault on this host does not support huge pages");
289 /* Callback from postcopy_ram_supported_by_host block iterator.
291 static int test_ramblock_postcopiable(const char *block_name
, void *host_addr
,
292 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
294 RAMBlock
*rb
= qemu_ram_block_by_name(block_name
);
295 size_t pagesize
= qemu_ram_pagesize(rb
);
297 if (qemu_ram_is_shared(rb
)) {
298 error_report("Postcopy on shared RAM (%s) is not yet supported",
303 if (length
% pagesize
) {
304 error_report("Postcopy requires RAM blocks to be a page size multiple,"
305 " block %s is 0x" RAM_ADDR_FMT
" bytes with a "
306 "page size of 0x%zx", block_name
, length
, pagesize
);
313 * Note: This has the side effect of munlock'ing all of RAM, that's
314 * normally fine since if the postcopy succeeds it gets turned back on at the
317 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
319 long pagesize
= getpagesize();
321 bool ret
= false; /* Error unless we change it */
322 void *testarea
= NULL
;
323 struct uffdio_register reg_struct
;
324 struct uffdio_range range_struct
;
325 uint64_t feature_mask
;
327 if (qemu_target_page_size() > pagesize
) {
328 error_report("Target page size bigger than host page size");
332 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
334 error_report("%s: userfaultfd not available: %s", __func__
,
339 /* Version and features check */
340 if (!ufd_check_and_apply(ufd
, mis
)) {
344 /* We don't support postcopy with shared RAM yet */
345 if (qemu_ram_foreach_block(test_ramblock_postcopiable
, NULL
)) {
350 * userfault and mlock don't go together; we'll put it back later if
354 error_report("%s: munlockall: %s", __func__
, strerror(errno
));
359 * We need to check that the ops we need are supported on anon memory
360 * To do that we need to register a chunk and see the flags that
363 testarea
= mmap(NULL
, pagesize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
364 MAP_ANONYMOUS
, -1, 0);
365 if (testarea
== MAP_FAILED
) {
366 error_report("%s: Failed to map test area: %s", __func__
,
370 g_assert(((size_t)testarea
& (pagesize
-1)) == 0);
372 reg_struct
.range
.start
= (uintptr_t)testarea
;
373 reg_struct
.range
.len
= pagesize
;
374 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
376 if (ioctl(ufd
, UFFDIO_REGISTER
, ®_struct
)) {
377 error_report("%s userfault register: %s", __func__
, strerror(errno
));
381 range_struct
.start
= (uintptr_t)testarea
;
382 range_struct
.len
= pagesize
;
383 if (ioctl(ufd
, UFFDIO_UNREGISTER
, &range_struct
)) {
384 error_report("%s userfault unregister: %s", __func__
, strerror(errno
));
388 feature_mask
= (__u64
)1 << _UFFDIO_WAKE
|
389 (__u64
)1 << _UFFDIO_COPY
|
390 (__u64
)1 << _UFFDIO_ZEROPAGE
;
391 if ((reg_struct
.ioctls
& feature_mask
) != feature_mask
) {
392 error_report("Missing userfault map features: %" PRIx64
,
393 (uint64_t)(~reg_struct
.ioctls
& feature_mask
));
401 munmap(testarea
, pagesize
);
410 * Setup an area of RAM so that it *can* be used for postcopy later; this
411 * must be done right at the start prior to pre-copy.
412 * opaque should be the MIS.
414 static int init_range(const char *block_name
, void *host_addr
,
415 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
417 trace_postcopy_init_range(block_name
, host_addr
, offset
, length
);
420 * We need the whole of RAM to be truly empty for postcopy, so things
421 * like ROMs and any data tables built during init must be zero'd
422 * - we're going to get the copy from the source anyway.
423 * (Precopy will just overwrite this data, so doesn't need the discard)
425 if (ram_discard_range(block_name
, 0, length
)) {
433 * At the end of migration, undo the effects of init_range
434 * opaque should be the MIS.
436 static int cleanup_range(const char *block_name
, void *host_addr
,
437 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
439 MigrationIncomingState
*mis
= opaque
;
440 struct uffdio_range range_struct
;
441 trace_postcopy_cleanup_range(block_name
, host_addr
, offset
, length
);
444 * We turned off hugepage for the precopy stage with postcopy enabled
445 * we can turn it back on now.
447 qemu_madvise(host_addr
, length
, QEMU_MADV_HUGEPAGE
);
450 * We can also turn off userfault now since we should have all the
451 * pages. It can be useful to leave it on to debug postcopy
452 * if you're not sure it's always getting every page.
454 range_struct
.start
= (uintptr_t)host_addr
;
455 range_struct
.len
= length
;
457 if (ioctl(mis
->userfault_fd
, UFFDIO_UNREGISTER
, &range_struct
)) {
458 error_report("%s: userfault unregister %s", __func__
, strerror(errno
));
467 * Initialise postcopy-ram, setting the RAM to a state where we can go into
468 * postcopy later; must be called prior to any precopy.
469 * called from arch_init's similarly named ram_postcopy_incoming_init
471 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
473 if (qemu_ram_foreach_block(init_range
, NULL
)) {
481 * At the end of a migration where postcopy_ram_incoming_init was called.
483 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
485 trace_postcopy_ram_incoming_cleanup_entry();
487 if (mis
->have_fault_thread
) {
490 if (qemu_ram_foreach_block(cleanup_range
, mis
)) {
494 * Tell the fault_thread to exit, it's an eventfd that should
495 * currently be at 0, we're going to increment it to 1
498 if (write(mis
->userfault_quit_fd
, &tmp64
, 8) == 8) {
499 trace_postcopy_ram_incoming_cleanup_join();
500 qemu_thread_join(&mis
->fault_thread
);
502 /* Not much we can do here, but may as well report it */
503 error_report("%s: incrementing userfault_quit_fd: %s", __func__
,
506 trace_postcopy_ram_incoming_cleanup_closeuf();
507 close(mis
->userfault_fd
);
508 close(mis
->userfault_quit_fd
);
509 mis
->have_fault_thread
= false;
512 qemu_balloon_inhibit(false);
515 if (os_mlock() < 0) {
516 error_report("mlock: %s", strerror(errno
));
518 * It doesn't feel right to fail at this point, we have a valid
524 postcopy_state_set(POSTCOPY_INCOMING_END
);
526 if (mis
->postcopy_tmp_page
) {
527 munmap(mis
->postcopy_tmp_page
, mis
->largest_page_size
);
528 mis
->postcopy_tmp_page
= NULL
;
530 if (mis
->postcopy_tmp_zero_page
) {
531 munmap(mis
->postcopy_tmp_zero_page
, mis
->largest_page_size
);
532 mis
->postcopy_tmp_zero_page
= NULL
;
534 trace_postcopy_ram_incoming_cleanup_blocktime(
535 get_postcopy_total_blocktime());
537 trace_postcopy_ram_incoming_cleanup_exit();
542 * Disable huge pages on an area
544 static int nhp_range(const char *block_name
, void *host_addr
,
545 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
547 trace_postcopy_nhp_range(block_name
, host_addr
, offset
, length
);
550 * Before we do discards we need to ensure those discards really
551 * do delete areas of the page, even if THP thinks a hugepage would
552 * be a good idea, so force hugepages off.
554 qemu_madvise(host_addr
, length
, QEMU_MADV_NOHUGEPAGE
);
560 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
561 * however leaving it until after precopy means that most of the precopy
564 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
566 if (qemu_ram_foreach_block(nhp_range
, mis
)) {
570 postcopy_state_set(POSTCOPY_INCOMING_DISCARD
);
576 * Mark the given area of RAM as requiring notification to unwritten areas
577 * Used as a callback on qemu_ram_foreach_block.
578 * host_addr: Base of area to mark
579 * offset: Offset in the whole ram arena
580 * length: Length of the section
581 * opaque: MigrationIncomingState pointer
582 * Returns 0 on success
584 static int ram_block_enable_notify(const char *block_name
, void *host_addr
,
585 ram_addr_t offset
, ram_addr_t length
,
588 MigrationIncomingState
*mis
= opaque
;
589 struct uffdio_register reg_struct
;
591 reg_struct
.range
.start
= (uintptr_t)host_addr
;
592 reg_struct
.range
.len
= length
;
593 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
595 /* Now tell our userfault_fd that it's responsible for this area */
596 if (ioctl(mis
->userfault_fd
, UFFDIO_REGISTER
, ®_struct
)) {
597 error_report("%s userfault register: %s", __func__
, strerror(errno
));
600 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
601 error_report("%s userfault: Region doesn't support COPY", __func__
);
608 static int get_mem_fault_cpu_index(uint32_t pid
)
612 CPU_FOREACH(cpu_iter
) {
613 if (cpu_iter
->thread_id
== pid
) {
614 trace_get_mem_fault_cpu_index(cpu_iter
->cpu_index
, pid
);
615 return cpu_iter
->cpu_index
;
618 trace_get_mem_fault_cpu_index(-1, pid
);
623 * This function is being called when pagefault occurs. It
624 * tracks down vCPU blocking time.
626 * @addr: faulted host virtual address
627 * @ptid: faulted process thread id
628 * @rb: ramblock appropriate to addr
630 static void mark_postcopy_blocktime_begin(uintptr_t addr
, uint32_t ptid
,
633 int cpu
, already_received
;
634 MigrationIncomingState
*mis
= migration_incoming_get_current();
635 PostcopyBlocktimeContext
*dc
= mis
->blocktime_ctx
;
638 if (!dc
|| ptid
== 0) {
641 cpu
= get_mem_fault_cpu_index(ptid
);
646 now_ms
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
647 if (dc
->vcpu_addr
[cpu
] == 0) {
648 atomic_inc(&dc
->smp_cpus_down
);
651 atomic_xchg__nocheck(&dc
->last_begin
, now_ms
);
652 atomic_xchg__nocheck(&dc
->page_fault_vcpu_time
[cpu
], now_ms
);
653 atomic_xchg__nocheck(&dc
->vcpu_addr
[cpu
], addr
);
655 /* check it here, not at the begining of the function,
656 * due to, check could accur early than bitmap_set in
657 * qemu_ufd_copy_ioctl */
658 already_received
= ramblock_recv_bitmap_test(rb
, (void *)addr
);
659 if (already_received
) {
660 atomic_xchg__nocheck(&dc
->vcpu_addr
[cpu
], 0);
661 atomic_xchg__nocheck(&dc
->page_fault_vcpu_time
[cpu
], 0);
662 atomic_dec(&dc
->smp_cpus_down
);
664 trace_mark_postcopy_blocktime_begin(addr
, dc
, dc
->page_fault_vcpu_time
[cpu
],
665 cpu
, already_received
);
669 * This function just provide calculated blocktime per cpu and trace it.
670 * Total blocktime is calculated in mark_postcopy_blocktime_end.
673 * Assume we have 3 CPU
676 * -----***********------------xxx***************------------------------> CPU1
679 * ------------****************xxx---------------------------------------> CPU2
682 * ------------------------****xxx********-------------------------------> CPU3
684 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
685 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
686 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
687 * it's a part of total blocktime.
688 * S1 - here is last_begin
689 * Legend of the picture is following:
690 * * - means blocktime per vCPU
691 * x - means overlapped blocktime (total blocktime)
693 * @addr: host virtual address
695 static void mark_postcopy_blocktime_end(uintptr_t addr
)
697 MigrationIncomingState
*mis
= migration_incoming_get_current();
698 PostcopyBlocktimeContext
*dc
= mis
->blocktime_ctx
;
699 int i
, affected_cpu
= 0;
701 bool vcpu_total_blocktime
= false;
702 int64_t read_vcpu_time
;
708 now_ms
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
710 /* lookup cpu, to clear it,
711 * that algorithm looks straighforward, but it's not
712 * optimal, more optimal algorithm is keeping tree or hash
713 * where key is address value is a list of */
714 for (i
= 0; i
< smp_cpus
; i
++) {
715 uint64_t vcpu_blocktime
= 0;
717 read_vcpu_time
= atomic_fetch_add(&dc
->page_fault_vcpu_time
[i
], 0);
718 if (atomic_fetch_add(&dc
->vcpu_addr
[i
], 0) != addr
||
719 read_vcpu_time
== 0) {
722 atomic_xchg__nocheck(&dc
->vcpu_addr
[i
], 0);
723 vcpu_blocktime
= now_ms
- read_vcpu_time
;
725 /* we need to know is that mark_postcopy_end was due to
726 * faulted page, another possible case it's prefetched
727 * page and in that case we shouldn't be here */
728 if (!vcpu_total_blocktime
&&
729 atomic_fetch_add(&dc
->smp_cpus_down
, 0) == smp_cpus
) {
730 vcpu_total_blocktime
= true;
732 /* continue cycle, due to one page could affect several vCPUs */
733 dc
->vcpu_blocktime
[i
] += vcpu_blocktime
;
736 atomic_sub(&dc
->smp_cpus_down
, affected_cpu
);
737 if (vcpu_total_blocktime
) {
738 dc
->total_blocktime
+= now_ms
- atomic_fetch_add(&dc
->last_begin
, 0);
740 trace_mark_postcopy_blocktime_end(addr
, dc
, dc
->total_blocktime
,
745 * Handle faults detected by the USERFAULT markings
747 static void *postcopy_ram_fault_thread(void *opaque
)
749 MigrationIncomingState
*mis
= opaque
;
753 RAMBlock
*last_rb
= NULL
; /* last RAMBlock we sent part of */
755 trace_postcopy_ram_fault_thread_entry();
756 qemu_sem_post(&mis
->fault_thread_sem
);
759 ram_addr_t rb_offset
;
760 struct pollfd pfd
[2];
763 * We're mainly waiting for the kernel to give us a faulting HVA,
764 * however we can be told to quit via userfault_quit_fd which is
767 pfd
[0].fd
= mis
->userfault_fd
;
768 pfd
[0].events
= POLLIN
;
770 pfd
[1].fd
= mis
->userfault_quit_fd
;
771 pfd
[1].events
= POLLIN
; /* Waiting for eventfd to go positive */
774 if (poll(pfd
, 2, -1 /* Wait forever */) == -1) {
775 error_report("%s: userfault poll: %s", __func__
, strerror(errno
));
779 if (pfd
[1].revents
) {
780 trace_postcopy_ram_fault_thread_quit();
784 ret
= read(mis
->userfault_fd
, &msg
, sizeof(msg
));
785 if (ret
!= sizeof(msg
)) {
786 if (errno
== EAGAIN
) {
788 * if a wake up happens on the other thread just after
789 * the poll, there is nothing to read.
794 error_report("%s: Failed to read full userfault message: %s",
795 __func__
, strerror(errno
));
798 error_report("%s: Read %d bytes from userfaultfd expected %zd",
799 __func__
, ret
, sizeof(msg
));
800 break; /* Lost alignment, don't know what we'd read next */
803 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
804 error_report("%s: Read unexpected event %ud from userfaultfd",
805 __func__
, msg
.event
);
806 continue; /* It's not a page fault, shouldn't happen */
809 rb
= qemu_ram_block_from_host(
810 (void *)(uintptr_t)msg
.arg
.pagefault
.address
,
813 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
814 PRIx64
, (uint64_t)msg
.arg
.pagefault
.address
);
818 rb_offset
&= ~(qemu_ram_pagesize(rb
) - 1);
819 trace_postcopy_ram_fault_thread_request(msg
.arg
.pagefault
.address
,
820 qemu_ram_get_idstr(rb
),
822 msg
.arg
.pagefault
.feat
.ptid
);
824 mark_postcopy_blocktime_begin((uintptr_t)(msg
.arg
.pagefault
.address
),
825 msg
.arg
.pagefault
.feat
.ptid
, rb
);
827 * Send the request to the source - we want to request one
828 * of our host page sizes (which is >= TPS)
832 migrate_send_rp_req_pages(mis
, qemu_ram_get_idstr(rb
),
833 rb_offset
, qemu_ram_pagesize(rb
));
835 /* Save some space */
836 migrate_send_rp_req_pages(mis
, NULL
,
837 rb_offset
, qemu_ram_pagesize(rb
));
840 trace_postcopy_ram_fault_thread_exit();
844 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
846 /* Open the fd for the kernel to give us userfaults */
847 mis
->userfault_fd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
848 if (mis
->userfault_fd
== -1) {
849 error_report("%s: Failed to open userfault fd: %s", __func__
,
855 * Although the host check already tested the API, we need to
856 * do the check again as an ABI handshake on the new fd.
858 if (!ufd_check_and_apply(mis
->userfault_fd
, mis
)) {
862 /* Now an eventfd we use to tell the fault-thread to quit */
863 mis
->userfault_quit_fd
= eventfd(0, EFD_CLOEXEC
);
864 if (mis
->userfault_quit_fd
== -1) {
865 error_report("%s: Opening userfault_quit_fd: %s", __func__
,
867 close(mis
->userfault_fd
);
871 qemu_sem_init(&mis
->fault_thread_sem
, 0);
872 qemu_thread_create(&mis
->fault_thread
, "postcopy/fault",
873 postcopy_ram_fault_thread
, mis
, QEMU_THREAD_JOINABLE
);
874 qemu_sem_wait(&mis
->fault_thread_sem
);
875 qemu_sem_destroy(&mis
->fault_thread_sem
);
876 mis
->have_fault_thread
= true;
878 /* Mark so that we get notified of accesses to unwritten areas */
879 if (qemu_ram_foreach_block(ram_block_enable_notify
, mis
)) {
884 * Ballooning can mark pages as absent while we're postcopying
885 * that would cause false userfaults.
887 qemu_balloon_inhibit(true);
889 trace_postcopy_ram_enable_notify();
894 static int qemu_ufd_copy_ioctl(int userfault_fd
, void *host_addr
,
895 void *from_addr
, uint64_t pagesize
, RAMBlock
*rb
)
899 struct uffdio_copy copy_struct
;
900 copy_struct
.dst
= (uint64_t)(uintptr_t)host_addr
;
901 copy_struct
.src
= (uint64_t)(uintptr_t)from_addr
;
902 copy_struct
.len
= pagesize
;
903 copy_struct
.mode
= 0;
904 ret
= ioctl(userfault_fd
, UFFDIO_COPY
, ©_struct
);
906 struct uffdio_zeropage zero_struct
;
907 zero_struct
.range
.start
= (uint64_t)(uintptr_t)host_addr
;
908 zero_struct
.range
.len
= pagesize
;
909 zero_struct
.mode
= 0;
910 ret
= ioctl(userfault_fd
, UFFDIO_ZEROPAGE
, &zero_struct
);
913 ramblock_recv_bitmap_set_range(rb
, host_addr
,
914 pagesize
/ qemu_target_page_size());
915 mark_postcopy_blocktime_end((uintptr_t)host_addr
);
922 * Place a host page (from) at (host) atomically
923 * returns 0 on success
925 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
928 size_t pagesize
= qemu_ram_pagesize(rb
);
930 /* copy also acks to the kernel waking the stalled thread up
931 * TODO: We can inhibit that ack and only do it if it was requested
932 * which would be slightly cheaper, but we'd have to be careful
933 * of the order of updating our page state.
935 if (qemu_ufd_copy_ioctl(mis
->userfault_fd
, host
, from
, pagesize
, rb
)) {
937 error_report("%s: %s copy host: %p from: %p (size: %zd)",
938 __func__
, strerror(e
), host
, from
, pagesize
);
943 trace_postcopy_place_page(host
);
948 * Place a zero page at (host) atomically
949 * returns 0 on success
951 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
954 trace_postcopy_place_page_zero(host
);
956 if (qemu_ram_pagesize(rb
) == getpagesize()) {
957 if (qemu_ufd_copy_ioctl(mis
->userfault_fd
, host
, NULL
, getpagesize(),
960 error_report("%s: %s zero host: %p",
961 __func__
, strerror(e
), host
);
966 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
967 if (!mis
->postcopy_tmp_zero_page
) {
968 mis
->postcopy_tmp_zero_page
= mmap(NULL
, mis
->largest_page_size
,
969 PROT_READ
| PROT_WRITE
,
970 MAP_PRIVATE
| MAP_ANONYMOUS
,
972 if (mis
->postcopy_tmp_zero_page
== MAP_FAILED
) {
974 mis
->postcopy_tmp_zero_page
= NULL
;
975 error_report("%s: %s mapping large zero page",
976 __func__
, strerror(e
));
979 memset(mis
->postcopy_tmp_zero_page
, '\0', mis
->largest_page_size
);
981 return postcopy_place_page(mis
, host
, mis
->postcopy_tmp_zero_page
,
989 * Returns a target page of memory that can be mapped at a later point in time
990 * using postcopy_place_page
991 * The same address is used repeatedly, postcopy_place_page just takes the
993 * Returns: Pointer to allocated page
996 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
998 if (!mis
->postcopy_tmp_page
) {
999 mis
->postcopy_tmp_page
= mmap(NULL
, mis
->largest_page_size
,
1000 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
1001 MAP_ANONYMOUS
, -1, 0);
1002 if (mis
->postcopy_tmp_page
== MAP_FAILED
) {
1003 mis
->postcopy_tmp_page
= NULL
;
1004 error_report("%s: %s", __func__
, strerror(errno
));
1009 return mis
->postcopy_tmp_page
;
1013 /* No target OS support, stubs just fail */
1014 void fill_destination_postcopy_migration_info(MigrationInfo
*info
)
1018 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
1020 error_report("%s: No OS support", __func__
);
1024 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
1026 error_report("postcopy_ram_incoming_init: No OS support");
1030 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
1036 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
1042 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
1048 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
1055 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
1062 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
1070 /* ------------------------------------------------------------------------- */
1073 * postcopy_discard_send_init: Called at the start of each RAMBlock before
1074 * asking to discard individual ranges.
1076 * @ms: The current migration state.
1077 * @offset: the bitmap offset of the named RAMBlock in the migration
1079 * @name: RAMBlock that discards will operate on.
1081 * returns: a new PDS.
1083 PostcopyDiscardState
*postcopy_discard_send_init(MigrationState
*ms
,
1086 PostcopyDiscardState
*res
= g_malloc0(sizeof(PostcopyDiscardState
));
1089 res
->ramblock_name
= name
;
1096 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1097 * discard. May send a discard message, may just leave it queued to
1100 * @ms: Current migration state.
1101 * @pds: Structure initialised by postcopy_discard_send_init().
1102 * @start,@length: a range of pages in the migration bitmap in the
1103 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1105 void postcopy_discard_send_range(MigrationState
*ms
, PostcopyDiscardState
*pds
,
1106 unsigned long start
, unsigned long length
)
1108 size_t tp_size
= qemu_target_page_size();
1109 /* Convert to byte offsets within the RAM block */
1110 pds
->start_list
[pds
->cur_entry
] = start
* tp_size
;
1111 pds
->length_list
[pds
->cur_entry
] = length
* tp_size
;
1112 trace_postcopy_discard_send_range(pds
->ramblock_name
, start
, length
);
1116 if (pds
->cur_entry
== MAX_DISCARDS_PER_COMMAND
) {
1117 /* Full set, ship it! */
1118 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1129 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1130 * bitmap code. Sends any outstanding discard messages, frees the PDS
1132 * @ms: Current migration state.
1133 * @pds: Structure initialised by postcopy_discard_send_init().
1135 void postcopy_discard_send_finish(MigrationState
*ms
, PostcopyDiscardState
*pds
)
1137 /* Anything unsent? */
1138 if (pds
->cur_entry
) {
1139 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1147 trace_postcopy_discard_send_finish(pds
->ramblock_name
, pds
->nsentwords
,
1154 * Current state of incoming postcopy; note this is not part of
1155 * MigrationIncomingState since it's state is used during cleanup
1156 * at the end as MIS is being freed.
1158 static PostcopyState incoming_postcopy_state
;
1160 PostcopyState
postcopy_state_get(void)
1162 return atomic_mb_read(&incoming_postcopy_state
);
1165 /* Set the state and return the old state */
1166 PostcopyState
postcopy_state_set(PostcopyState new_state
)
1168 return atomic_xchg(&incoming_postcopy_state
, new_state
);