]> git.proxmox.com Git - qemu.git/blobdiff - migration.c
virtio-net: properly check the vhost status during status set
[qemu.git] / migration.c
index ef171d2b39cf3421f9eac7a682e2a74af45967de..3eb0fad147a40f25dde8cfaf1b9767775cbbb8e5 100644 (file)
  */
 
 #include "qemu-common.h"
-#include "migration.h"
-#include "monitor.h"
-#include "buffered_file.h"
-#include "sysemu.h"
-#include "block.h"
-#include "qemu_socket.h"
-#include "block-migration.h"
+#include "migration/migration.h"
+#include "monitor/monitor.h"
+#include "migration/qemu-file.h"
+#include "sysemu/sysemu.h"
+#include "block/block.h"
+#include "qemu/sockets.h"
+#include "migration/block.h"
+#include "qemu/thread.h"
 #include "qmp-commands.h"
+#include "trace.h"
 
 //#define DEBUG_MIGRATION
 
@@ -43,6 +45,11 @@ enum {
 
 #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
 
+/* Amount of time to allocate to each "chunk" of bandwidth-throttled
+ * data. */
+#define BUFFER_DELAY     100
+#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
+
 /* Migration XBZRLE default cache size */
 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
 
@@ -64,33 +71,35 @@ MigrationState *migrate_get_current(void)
     return &current_migration;
 }
 
-int qemu_start_incoming_migration(const char *uri, Error **errp)
+void qemu_start_incoming_migration(const char *uri, Error **errp)
 {
     const char *p;
-    int ret;
 
     if (strstart(uri, "tcp:", &p))
-        ret = tcp_start_incoming_migration(p, errp);
+        tcp_start_incoming_migration(p, errp);
 #if !defined(WIN32)
     else if (strstart(uri, "exec:", &p))
-        ret =  exec_start_incoming_migration(p);
+        exec_start_incoming_migration(p, errp);
     else if (strstart(uri, "unix:", &p))
-        ret = unix_start_incoming_migration(p, errp);
+        unix_start_incoming_migration(p, errp);
     else if (strstart(uri, "fd:", &p))
-        ret = fd_start_incoming_migration(p);
+        fd_start_incoming_migration(p, errp);
 #endif
     else {
-        fprintf(stderr, "unknown migration protocol: %s\n", uri);
-        ret = -EPROTONOSUPPORT;
+        error_setg(errp, "unknown migration protocol: %s", uri);
     }
-    return ret;
 }
 
-void process_incoming_migration(QEMUFile *f)
+static void process_incoming_migration_co(void *opaque)
 {
-    if (qemu_loadvm_state(f) < 0) {
+    QEMUFile *f = opaque;
+    int ret;
+
+    ret = qemu_loadvm_state(f);
+    qemu_fclose(f);
+    if (ret < 0) {
         fprintf(stderr, "load of migration failed\n");
-        exit(0);
+        exit(EXIT_FAILURE);
     }
     qemu_announce_self();
     DPRINTF("successfully loaded vm state\n");
@@ -102,10 +111,20 @@ void process_incoming_migration(QEMUFile *f)
     if (autostart) {
         vm_start();
     } else {
-        runstate_set(RUN_STATE_PRELAUNCH);
+        runstate_set(RUN_STATE_PAUSED);
     }
 }
 
+void process_incoming_migration(QEMUFile *f)
+{
+    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
+    int fd = qemu_get_fd(f);
+
+    assert(fd != -1);
+    qemu_set_nonblock(fd);
+    qemu_coroutine_enter(co, f);
+}
+
 /* amount of nanoseconds we are willing to wait for migration to be down.
  * the choice of nanoseconds is because it is the maximum resolution that
  * get_clock() can achieve. It is an internal measure. All user-visible
@@ -178,11 +197,11 @@ MigrationInfo *qmp_query_migrate(Error **errp)
         info->ram->remaining = ram_bytes_remaining();
         info->ram->total = ram_bytes_total();
         info->ram->duplicate = dup_mig_pages_transferred();
+        info->ram->skipped = skipped_mig_pages_transferred();
         info->ram->normal = norm_mig_pages_transferred();
         info->ram->normal_bytes = norm_mig_bytes_transferred();
         info->ram->dirty_pages_rate = s->dirty_pages_rate;
 
-
         if (blk_mig_active()) {
             info->has_disk = true;
             info->disk = g_malloc0(sizeof(*info->disk));
@@ -208,6 +227,7 @@ MigrationInfo *qmp_query_migrate(Error **errp)
         info->ram->remaining = 0;
         info->ram->total = ram_bytes_total();
         info->ram->duplicate = dup_mig_pages_transferred();
+        info->ram->skipped = skipped_mig_pages_transferred();
         info->ram->normal = norm_mig_pages_transferred();
         info->ram->normal_bytes = norm_mig_bytes_transferred();
         break;
@@ -242,162 +262,54 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
 
 /* shared migration helpers */
 
-static int migrate_fd_cleanup(MigrationState *s)
+static void migrate_fd_cleanup(void *opaque)
 {
-    int ret = 0;
+    MigrationState *s = opaque;
 
-    if (s->fd != -1) {
-        qemu_set_fd_handler2(s->fd, NULL, NULL, NULL, NULL);
-    }
+    qemu_bh_delete(s->cleanup_bh);
+    s->cleanup_bh = NULL;
 
     if (s->file) {
         DPRINTF("closing file\n");
-        ret = qemu_fclose(s->file);
-        s->file = NULL;
-    }
+        qemu_mutex_unlock_iothread();
+        qemu_thread_join(&s->thread);
+        qemu_mutex_lock_iothread();
 
-    if (s->fd != -1) {
-        close(s->fd);
-        s->fd = -1;
+        qemu_fclose(s->file);
+        s->file = NULL;
     }
 
-    return ret;
-}
+    assert(s->state != MIG_STATE_ACTIVE);
 
-void migrate_fd_error(MigrationState *s)
-{
-    DPRINTF("setting error state\n");
-    s->state = MIG_STATE_ERROR;
-    notifier_list_notify(&migration_state_notifiers, s);
-    migrate_fd_cleanup(s);
-}
-
-static void migrate_fd_completed(MigrationState *s)
-{
-    DPRINTF("setting completed state\n");
-    if (migrate_fd_cleanup(s) < 0) {
-        s->state = MIG_STATE_ERROR;
-    } else {
-        s->state = MIG_STATE_COMPLETED;
-        runstate_set(RUN_STATE_POSTMIGRATE);
+    if (s->state != MIG_STATE_COMPLETED) {
+        qemu_savevm_state_cancel();
     }
-    notifier_list_notify(&migration_state_notifiers, s);
-}
-
-static void migrate_fd_put_notify(void *opaque)
-{
-    MigrationState *s = opaque;
-    int ret;
 
-    qemu_set_fd_handler2(s->fd, NULL, NULL, NULL, NULL);
-    ret = qemu_file_put_notify(s->file);
-    if (ret) {
-        migrate_fd_error(s);
-    }
+    notifier_list_notify(&migration_state_notifiers, s);
 }
 
-ssize_t migrate_fd_put_buffer(MigrationState *s, const void *data,
-                              size_t size)
+static void migrate_finish_set_state(MigrationState *s, int new_state)
 {
-    ssize_t ret;
-
-    if (s->state != MIG_STATE_ACTIVE) {
-        return -EIO;
+    if (__sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE,
+                                    new_state) == new_state) {
+        trace_migrate_set_state(new_state);
     }
-
-    do {
-        ret = s->write(s, data, size);
-    } while (ret == -1 && ((s->get_error(s)) == EINTR));
-
-    if (ret == -1)
-        ret = -(s->get_error(s));
-
-    if (ret == -EAGAIN) {
-        qemu_set_fd_handler2(s->fd, NULL, NULL, migrate_fd_put_notify, s);
-    }
-
-    return ret;
 }
 
-void migrate_fd_put_ready(MigrationState *s)
+void migrate_fd_error(MigrationState *s)
 {
-    int ret;
-
-    if (s->state != MIG_STATE_ACTIVE) {
-        DPRINTF("put_ready returning because of non-active state\n");
-        return;
-    }
-
-    DPRINTF("iterate\n");
-    ret = qemu_savevm_state_iterate(s->file);
-    if (ret < 0) {
-        migrate_fd_error(s);
-    } else if (ret == 1) {
-        int old_vm_running = runstate_is_running();
-        int64_t start_time, end_time;
-
-        DPRINTF("done iterating\n");
-        start_time = qemu_get_clock_ms(rt_clock);
-        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
-        vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
-
-        if (qemu_savevm_state_complete(s->file) < 0) {
-            migrate_fd_error(s);
-        } else {
-            migrate_fd_completed(s);
-        }
-        end_time = qemu_get_clock_ms(rt_clock);
-        s->total_time = end_time - s->total_time;
-        s->downtime = end_time - start_time;
-        if (s->state != MIG_STATE_COMPLETED) {
-            if (old_vm_running) {
-                vm_start();
-            }
-        }
-    }
+    DPRINTF("setting error state\n");
+    assert(s->file == NULL);
+    s->state = MIG_STATE_ERROR;
+    trace_migrate_set_state(MIG_STATE_ERROR);
+    notifier_list_notify(&migration_state_notifiers, s);
 }
 
 static void migrate_fd_cancel(MigrationState *s)
 {
-    if (s->state != MIG_STATE_ACTIVE)
-        return;
-
     DPRINTF("cancelling migration\n");
 
-    s->state = MIG_STATE_CANCELLED;
-    notifier_list_notify(&migration_state_notifiers, s);
-    qemu_savevm_state_cancel(s->file);
-
-    migrate_fd_cleanup(s);
-}
-
-int migrate_fd_wait_for_unfreeze(MigrationState *s)
-{
-    int ret;
-
-    DPRINTF("wait for unfreeze\n");
-    if (s->state != MIG_STATE_ACTIVE)
-        return -EINVAL;
-
-    do {
-        fd_set wfds;
-
-        FD_ZERO(&wfds);
-        FD_SET(s->fd, &wfds);
-
-        ret = select(s->fd + 1, NULL, &wfds, NULL, NULL);
-    } while (ret == -1 && (s->get_error(s)) == EINTR);
-
-    if (ret == -1) {
-        return -s->get_error(s);
-    }
-    return 0;
-}
-
-int migrate_fd_close(MigrationState *s)
-{
-    qemu_set_fd_handler2(s->fd, NULL, NULL, NULL, NULL);
-    return s->close(s);
+    migrate_finish_set_state(s, MIG_STATE_CANCELLED);
 }
 
 void add_migration_state_change_notifier(Notifier *notify)
@@ -426,23 +338,6 @@ bool migration_has_failed(MigrationState *s)
             s->state == MIG_STATE_ERROR);
 }
 
-void migrate_fd_connect(MigrationState *s)
-{
-    int ret;
-
-    s->state = MIG_STATE_ACTIVE;
-    s->file = qemu_fopen_ops_buffered(s);
-
-    DPRINTF("beginning savevm\n");
-    ret = qemu_savevm_state_begin(s->file, &s->params);
-    if (ret < 0) {
-        DPRINTF("failed, %d\n", ret);
-        migrate_fd_error(s);
-        return;
-    }
-    migrate_fd_put_ready(s);
-}
-
 static MigrationState *migrate_init(const MigrationParams *params)
 {
     MigrationState *s = migrate_get_current();
@@ -462,8 +357,9 @@ static MigrationState *migrate_init(const MigrationParams *params)
 
     s->bandwidth_limit = bandwidth_limit;
     s->state = MIG_STATE_SETUP;
-    s->total_time = qemu_get_clock_ms(rt_clock);
+    trace_migrate_set_state(MIG_STATE_SETUP);
 
+    s->total_time = qemu_get_clock_ms(rt_clock);
     return s;
 }
 
@@ -527,8 +423,6 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
         error_propagate(errp, local_err);
         return;
     }
-
-    notifier_list_notify(&migration_state_notifiers, s);
 }
 
 void qmp_migrate_cancel(Error **errp)
@@ -562,10 +456,15 @@ void qmp_migrate_set_speed(int64_t value, Error **errp)
     if (value < 0) {
         value = 0;
     }
+    if (value > SIZE_MAX) {
+        value = SIZE_MAX;
+    }
 
     s = migrate_get_current();
     s->bandwidth_limit = value;
-    qemu_file_set_rate_limit(s->file, s->bandwidth_limit);
+    if (s->file) {
+        qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
+    }
 }
 
 void qmp_migrate_set_downtime(double value, Error **errp)
@@ -592,3 +491,111 @@ int64_t migrate_xbzrle_cache_size(void)
 
     return s->xbzrle_cache_size;
 }
+
+/* migration thread support */
+
+static void *migration_thread(void *opaque)
+{
+    MigrationState *s = opaque;
+    int64_t initial_time = qemu_get_clock_ms(rt_clock);
+    int64_t sleep_time = 0;
+    int64_t initial_bytes = 0;
+    int64_t max_size = 0;
+    int64_t start_time = initial_time;
+    bool old_vm_running = false;
+
+    DPRINTF("beginning savevm\n");
+    qemu_savevm_state_begin(s->file, &s->params);
+
+    while (s->state == MIG_STATE_ACTIVE) {
+        int64_t current_time;
+        uint64_t pending_size;
+
+        if (!qemu_file_rate_limit(s->file)) {
+            DPRINTF("iterate\n");
+            pending_size = qemu_savevm_state_pending(s->file, max_size);
+            DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
+            if (pending_size && pending_size >= max_size) {
+                qemu_savevm_state_iterate(s->file);
+            } else {
+                DPRINTF("done iterating\n");
+                qemu_mutex_lock_iothread();
+                start_time = qemu_get_clock_ms(rt_clock);
+                qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
+                old_vm_running = runstate_is_running();
+                vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+                qemu_file_set_rate_limit(s->file, INT_MAX);
+                qemu_savevm_state_complete(s->file);
+                qemu_mutex_unlock_iothread();
+                if (!qemu_file_get_error(s->file)) {
+                    migrate_finish_set_state(s, MIG_STATE_COMPLETED);
+                    break;
+                }
+            }
+        }
+
+        if (qemu_file_get_error(s->file)) {
+            migrate_finish_set_state(s, MIG_STATE_ERROR);
+            break;
+        }
+        current_time = qemu_get_clock_ms(rt_clock);
+        if (current_time >= initial_time + BUFFER_DELAY) {
+            uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
+            uint64_t time_spent = current_time - initial_time - sleep_time;
+            double bandwidth = transferred_bytes / time_spent;
+            max_size = bandwidth * migrate_max_downtime() / 1000000;
+
+            DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
+                    " bandwidth %g max_size %" PRId64 "\n",
+                    transferred_bytes, time_spent, bandwidth, max_size);
+            /* if we haven't sent anything, we don't want to recalculate
+               10000 is a small enough number for our purposes */
+            if (s->dirty_bytes_rate && transferred_bytes > 10000) {
+                s->expected_downtime = s->dirty_bytes_rate / bandwidth;
+            }
+
+            qemu_file_reset_rate_limit(s->file);
+            sleep_time = 0;
+            initial_time = current_time;
+            initial_bytes = qemu_ftell(s->file);
+        }
+        if (qemu_file_rate_limit(s->file)) {
+            /* usleep expects microseconds */
+            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
+            sleep_time += qemu_get_clock_ms(rt_clock) - current_time;
+        }
+    }
+
+    qemu_mutex_lock_iothread();
+    if (s->state == MIG_STATE_COMPLETED) {
+        int64_t end_time = qemu_get_clock_ms(rt_clock);
+        s->total_time = end_time - s->total_time;
+        s->downtime = end_time - start_time;
+        runstate_set(RUN_STATE_POSTMIGRATE);
+    } else {
+        if (old_vm_running) {
+            vm_start();
+        }
+    }
+    qemu_bh_schedule(s->cleanup_bh);
+    qemu_mutex_unlock_iothread();
+
+    return NULL;
+}
+
+void migrate_fd_connect(MigrationState *s)
+{
+    s->state = MIG_STATE_ACTIVE;
+    trace_migrate_set_state(MIG_STATE_ACTIVE);
+
+    /* This is a best 1st approximation. ns to ms */
+    s->expected_downtime = max_downtime/1000000;
+    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
+
+    qemu_file_set_rate_limit(s->file,
+                             s->bandwidth_limit / XFER_LIMIT_RATIO);
+
+    qemu_thread_create(&s->thread, migration_thread, s,
+                       QEMU_THREAD_JOINABLE);
+    notifier_list_notify(&migration_state_notifiers, s);
+}