#include "qemu/osdep.h"
#include "qemu/rcu.h"
+#include "qemu/madvise.h"
#include "exec/target_page.h"
#include "migration.h"
#include "qemu-file.h"
&pnd);
}
+/*
+ * NOTE: this routine is not thread safe, we can't call it concurrently. But it
+ * should be good enough for migration's purposes.
+ */
+void postcopy_thread_create(MigrationIncomingState *mis,
+ QemuThread *thread, const char *name,
+ void *(*fn)(void *), int joinable)
+{
+ qemu_sem_init(&mis->thread_sync_sem, 0);
+ qemu_thread_create(thread, name, fn, mis, joinable);
+ qemu_sem_wait(&mis->thread_sync_sem);
+ qemu_sem_destroy(&mis->thread_sync_sem);
+}
+
/* Postcopy needs to detect accesses to pages that haven't yet been copied
* across, and efficiently map new pages in, the techniques for doing this
* are target OS specific.
return false;
}
- if (qemu_real_host_page_size != ram_pagesize_summary()) {
+ if (qemu_real_host_page_size() != ram_pagesize_summary()) {
bool have_hp = false;
/* We've got a huge page */
#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
*/
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
- long pagesize = qemu_real_host_page_size;
+ long pagesize = qemu_real_host_page_size();
int ufd = -1;
bool ret = false; /* Error unless we change it */
void *testarea = NULL;
static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
{
- if (mis->postcopy_tmp_page) {
- munmap(mis->postcopy_tmp_page, mis->largest_page_size);
- mis->postcopy_tmp_page = NULL;
+ int i;
+
+ if (mis->postcopy_tmp_pages) {
+ for (i = 0; i < mis->postcopy_channels; i++) {
+ if (mis->postcopy_tmp_pages[i].tmp_huge_page) {
+ munmap(mis->postcopy_tmp_pages[i].tmp_huge_page,
+ mis->largest_page_size);
+ mis->postcopy_tmp_pages[i].tmp_huge_page = NULL;
+ }
+ }
+ g_free(mis->postcopy_tmp_pages);
+ mis->postcopy_tmp_pages = NULL;
}
if (mis->postcopy_tmp_zero_page) {
affected_cpu);
}
-static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
+static void postcopy_pause_fault_thread(MigrationIncomingState *mis)
{
trace_postcopy_pause_fault_thread();
-
qemu_sem_wait(&mis->postcopy_pause_sem_fault);
-
trace_postcopy_pause_fault_thread_continued();
-
- return true;
}
/*
trace_postcopy_ram_fault_thread_entry();
rcu_register_thread();
mis->last_rb = NULL; /* last RAMBlock we sent part of */
- qemu_sem_post(&mis->fault_thread_sem);
+ qemu_sem_post(&mis->thread_sync_sem);
struct pollfd *pfd;
size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
* broken already using the event. We should hold until
* the channel is rebuilt.
*/
- if (postcopy_pause_fault_thread(mis)) {
- /* Continue to read the userfaultfd */
- } else {
- error_report("%s: paused but don't allow to continue",
- __func__);
- break;
- }
+ postcopy_pause_fault_thread(mis);
}
if (pfd[1].revents) {
msg.arg.pagefault.address);
if (ret) {
/* May be network failure, try to wait for recovery */
- if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
- /* We got reconnected somehow, try to continue */
- goto retry;
- } else {
- /* This is a unavoidable fault */
- error_report("%s: postcopy_request_page() get %d",
- __func__, ret);
- break;
- }
+ postcopy_pause_fault_thread(mis);
+ goto retry;
}
}
static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
{
- int err;
-
- mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mis->postcopy_tmp_page == MAP_FAILED) {
- err = errno;
- mis->postcopy_tmp_page = NULL;
- error_report("%s: Failed to map postcopy_tmp_page %s",
- __func__, strerror(err));
- return -err;
+ PostcopyTmpPage *tmp_page;
+ int err, i, channels;
+ void *temp_page;
+
+ /* TODO: will be boosted when enable postcopy preemption */
+ mis->postcopy_channels = 1;
+
+ channels = mis->postcopy_channels;
+ mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels);
+
+ for (i = 0; i < channels; i++) {
+ tmp_page = &mis->postcopy_tmp_pages[i];
+ temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (temp_page == MAP_FAILED) {
+ err = errno;
+ error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s",
+ __func__, i, strerror(err));
+ /* Clean up will be done later */
+ return -err;
+ }
+ tmp_page->tmp_huge_page = temp_page;
+ /* Initialize default states for each tmp page */
+ postcopy_temp_page_reset(tmp_page);
}
/*
return -1;
}
- qemu_sem_init(&mis->fault_thread_sem, 0);
- qemu_thread_create(&mis->fault_thread, "postcopy/fault",
- postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
- qemu_sem_wait(&mis->fault_thread_sem);
- qemu_sem_destroy(&mis->fault_thread_sem);
+ postcopy_thread_create(mis, &mis->fault_thread, "postcopy/fault",
+ postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE);
mis->have_fault_thread = true;
/* Mark so that we get notified of accesses to unwritten areas */
#endif
/* ------------------------------------------------------------------------- */
+void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page)
+{
+ tmp_page->target_pages = 0;
+ tmp_page->host_addr = NULL;
+ /*
+ * This is set to true when reset, and cleared as long as we received any
+ * of the non-zero small page within this huge page.
+ */
+ tmp_page->all_zero = true;
+}
void postcopy_fault_thread_notify(MigrationIncomingState *mis)
{