]> git.proxmox.com Git - qemu.git/blobdiff - block-migration.c
Open 2.0 development tree
[qemu.git] / block-migration.c
index d62a8b80bac428bb9a13780991db3f4196fda435..daf9ec1eab77cfadc35cf70d8ea369934f1c0f29 100644 (file)
@@ -29,6 +29,7 @@
 #define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
 #define BLK_MIG_FLAG_EOS                0x02
 #define BLK_MIG_FLAG_PROGRESS           0x04
+#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
 
 #define MAX_IS_ALLOCATED_SEARCH 65536
 
@@ -54,7 +55,7 @@ typedef struct BlkMigDevState {
     int64_t cur_sector;
     int64_t cur_dirty;
 
-    /* Protected by iothread lock.  */
+    /* Protected by block migration lock.  */
     unsigned long *aio_bitmap;
     int64_t completed_sectors;
 } BlkMigDevState;
@@ -69,7 +70,7 @@ typedef struct BlkMigBlock {
     QEMUIOVector qiov;
     BlockDriverAIOCB *aiocb;
 
-    /* Protected by iothread lock.  */
+    /* Protected by block migration lock.  */
     int ret;
     QSIMPLEQ_ENTRY(BlkMigBlock) entry;
 } BlkMigBlock;
@@ -80,8 +81,9 @@ typedef struct BlkMigState {
     int shared_base;
     QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
     int64_t total_sector_sum;
+    bool zero_blocks;
 
-    /* Protected by iothread lock.  */
+    /* Protected by lock.  */
     QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
     int submitted;
     int read_done;
@@ -90,23 +92,54 @@ typedef struct BlkMigState {
     int transferred;
     int prev_progress;
     int bulk_completed;
+
+    /* Lock must be taken _inside_ the iothread lock.  */
+    QemuMutex lock;
 } BlkMigState;
 
 static BlkMigState block_mig_state;
 
+static void blk_mig_lock(void)
+{
+    qemu_mutex_lock(&block_mig_state.lock);
+}
+
+static void blk_mig_unlock(void)
+{
+    qemu_mutex_unlock(&block_mig_state.lock);
+}
+
+/* Must run outside of the iothread lock during the bulk phase,
+ * or the VM will stall.
+ */
+
 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
 {
     int len;
+    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
+
+    if (block_mig_state.zero_blocks &&
+        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
+    }
 
     /* sector number and flags */
     qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
-                     | BLK_MIG_FLAG_DEVICE_BLOCK);
+                     | flags);
 
     /* device name */
     len = strlen(blk->bmds->bs->device_name);
     qemu_put_byte(f, len);
     qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
 
+    /* if a block is zero we need to flush here since the network
+     * bandwidth is now a lot higher than the storage device bandwidth.
+     * thus if we queue zero blocks we slow down the migration */
+    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+        qemu_fflush(f);
+        return;
+    }
+
     qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
 }
 
@@ -120,9 +153,11 @@ uint64_t blk_mig_bytes_transferred(void)
     BlkMigDevState *bmds;
     uint64_t sum = 0;
 
+    blk_mig_lock();
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
         sum += bmds->completed_sectors;
     }
+    blk_mig_unlock();
     return sum << BDRV_SECTOR_BITS;
 }
 
@@ -142,6 +177,9 @@ uint64_t blk_mig_bytes_total(void)
     return sum << BDRV_SECTOR_BITS;
 }
 
+
+/* Called with migration lock held.  */
+
 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
 {
     int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
@@ -154,6 +192,8 @@ static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
     }
 }
 
+/* Called with migration lock held.  */
+
 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
                              int nb_sectors, int set)
 {
@@ -188,10 +228,13 @@ static void alloc_aio_bitmap(BlkMigDevState *bmds)
     bmds->aio_bitmap = g_malloc0(bitmap_size);
 }
 
+/* Never hold migration lock when yielding to the main loop!  */
+
 static void blk_mig_read_cb(void *opaque, int ret)
 {
     BlkMigBlock *blk = opaque;
 
+    blk_mig_lock();
     blk->ret = ret;
 
     QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
@@ -200,8 +243,11 @@ static void blk_mig_read_cb(void *opaque, int ret)
     block_mig_state.submitted--;
     block_mig_state.read_done++;
     assert(block_mig_state.submitted >= 0);
+    blk_mig_unlock();
 }
 
+/* Called with no lock taken.  */
+
 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
 {
     int64_t total_sectors = bmds->total_sectors;
@@ -211,11 +257,13 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
     int nr_sectors;
 
     if (bmds->shared_base) {
+        qemu_mutex_lock_iothread();
         while (cur_sector < total_sectors &&
                !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
                                   &nr_sectors)) {
             cur_sector += nr_sectors;
         }
+        qemu_mutex_unlock_iothread();
     }
 
     if (cur_sector >= total_sectors) {
@@ -244,17 +292,23 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
     blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
     qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
 
+    blk_mig_lock();
     block_mig_state.submitted++;
+    blk_mig_unlock();
 
+    qemu_mutex_lock_iothread();
     blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
                                 nr_sectors, blk_mig_read_cb, blk);
 
     bdrv_reset_dirty(bs, cur_sector, nr_sectors);
-    bmds->cur_sector = cur_sector + nr_sectors;
+    qemu_mutex_unlock_iothread();
 
+    bmds->cur_sector = cur_sector + nr_sectors;
     return (bmds->cur_sector >= total_sectors);
 }
 
+/* Called with iothread lock taken.  */
+
 static void set_dirty_tracking(int enable)
 {
     BlkMigDevState *bmds;
@@ -282,8 +336,8 @@ static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
         bmds->completed_sectors = 0;
         bmds->shared_base = block_mig_state.shared_base;
         alloc_aio_bitmap(bmds);
-        drive_get_ref(drive_get_by_blockdev(bs));
         bdrv_set_in_use(bs, 1);
+        bdrv_ref(bs);
 
         block_mig_state.total_sector_sum += sectors;
 
@@ -306,10 +360,13 @@ static void init_blk_migration(QEMUFile *f)
     block_mig_state.total_sector_sum = 0;
     block_mig_state.prev_progress = -1;
     block_mig_state.bulk_completed = 0;
+    block_mig_state.zero_blocks = migrate_zero_blocks();
 
     bdrv_iterate(init_blk_migration_it, NULL);
 }
 
+/* Called with no lock taken.  */
+
 static int blk_mig_save_bulked_block(QEMUFile *f)
 {
     int64_t completed_sector_sum = 0;
@@ -356,6 +413,8 @@ static void blk_mig_reset_dirty_cursor(void)
     }
 }
 
+/* Called with iothread lock taken.  */
+
 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
                                  int is_async)
 {
@@ -366,8 +425,12 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
     int ret = -EIO;
 
     for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
+        blk_mig_lock();
         if (bmds_aio_inflight(bmds, sector)) {
+            blk_mig_unlock();
             bdrv_drain_all();
+        } else {
+            blk_mig_unlock();
         }
         if (bdrv_get_dirty(bmds->bs, sector)) {
 
@@ -389,8 +452,11 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
 
                 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
                                             nr_sectors, blk_mig_read_cb, blk);
+
+                blk_mig_lock();
                 block_mig_state.submitted++;
                 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
+                blk_mig_unlock();
             } else {
                 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
                 if (ret < 0) {
@@ -418,7 +484,9 @@ error:
     return ret;
 }
 
-/* return value:
+/* Called with iothread lock taken.
+ *
+ * return value:
  * 0: too much data for max_downtime
  * 1: few enough data for max_downtime
 */
@@ -437,6 +505,8 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
     return ret;
 }
 
+/* Called with no locks taken.  */
+
 static int flush_blks(QEMUFile *f)
 {
     BlkMigBlock *blk;
@@ -446,6 +516,7 @@ static int flush_blks(QEMUFile *f)
             __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
             block_mig_state.transferred);
 
+    blk_mig_lock();
     while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
         if (qemu_file_rate_limit(f)) {
             break;
@@ -456,7 +527,9 @@ static int flush_blks(QEMUFile *f)
         }
 
         QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
+        blk_mig_unlock();
         blk_send(f, blk);
+        blk_mig_lock();
 
         g_free(blk->buf);
         g_free(blk);
@@ -465,6 +538,7 @@ static int flush_blks(QEMUFile *f)
         block_mig_state.transferred++;
         assert(block_mig_state.read_done >= 0);
     }
+    blk_mig_unlock();
 
     DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
             block_mig_state.submitted, block_mig_state.read_done,
@@ -472,6 +546,8 @@ static int flush_blks(QEMUFile *f)
     return ret;
 }
 
+/* Called with iothread lock taken.  */
+
 static int64_t get_remaining_dirty(void)
 {
     BlkMigDevState *bmds;
@@ -484,6 +560,8 @@ static int64_t get_remaining_dirty(void)
     return dirty << BDRV_SECTOR_BITS;
 }
 
+/* Called with iothread lock taken.  */
+
 static void blk_mig_cleanup(void)
 {
     BlkMigDevState *bmds;
@@ -493,10 +571,11 @@ static void blk_mig_cleanup(void)
 
     set_dirty_tracking(0);
 
+    blk_mig_lock();
     while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
         QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
         bdrv_set_in_use(bmds->bs, 0);
-        drive_put_ref(drive_get_by_blockdev(bmds->bs));
+        bdrv_unref(bmds->bs);
         g_free(bmds->aio_bitmap);
         g_free(bmds);
     }
@@ -506,6 +585,7 @@ static void blk_mig_cleanup(void)
         g_free(blk->buf);
         g_free(blk);
     }
+    blk_mig_unlock();
 }
 
 static void block_migration_cancel(void *opaque)
@@ -520,10 +600,12 @@ static int block_save_setup(QEMUFile *f, void *opaque)
     DPRINTF("Enter save live setup submitted %d transferred %d\n",
             block_mig_state.submitted, block_mig_state.transferred);
 
+    qemu_mutex_lock_iothread();
     init_blk_migration(f);
 
     /* start track dirty blocks */
     set_dirty_tracking(1);
+    qemu_mutex_unlock_iothread();
 
     ret = flush_blks(f);
     blk_mig_reset_dirty_cursor();
@@ -548,9 +630,11 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
     blk_mig_reset_dirty_cursor();
 
     /* control the rate of transfer */
+    blk_mig_lock();
     while ((block_mig_state.submitted +
             block_mig_state.read_done) * BLOCK_SIZE <
            qemu_file_get_rate_limit(f)) {
+        blk_mig_unlock();
         if (block_mig_state.bulk_completed == 0) {
             /* first finish the bulk phase */
             if (blk_mig_save_bulked_block(f) == 0) {
@@ -559,16 +643,23 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
             }
             ret = 0;
         } else {
+            /* Always called with iothread lock taken for
+             * simplicity, block_save_complete also calls it.
+             */
+            qemu_mutex_lock_iothread();
             ret = blk_mig_save_dirty_block(f, 1);
+            qemu_mutex_unlock_iothread();
         }
         if (ret < 0) {
             return ret;
         }
+        blk_mig_lock();
         if (ret != 0) {
             /* no more dirty blocks */
             break;
         }
     }
+    blk_mig_unlock();
 
     ret = flush_blks(f);
     if (ret) {
@@ -579,6 +670,8 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
     return qemu_ftell(f) - last_ftell;
 }
 
+/* Called with iothread lock taken.  */
+
 static int block_save_complete(QEMUFile *f, void *opaque)
 {
     int ret;
@@ -595,7 +688,9 @@ static int block_save_complete(QEMUFile *f, void *opaque)
 
     /* we know for sure that save bulk is completed and
        all async read completed */
+    blk_mig_lock();
     assert(block_mig_state.submitted == 0);
+    blk_mig_unlock();
 
     do {
         ret = blk_mig_save_dirty_block(f, 0);
@@ -620,6 +715,8 @@ static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
     /* Estimate pending number of bytes to send */
     uint64_t pending;
 
+    qemu_mutex_lock_iothread();
+    blk_mig_lock();
     pending = get_remaining_dirty() +
                        block_mig_state.submitted * BLOCK_SIZE +
                        block_mig_state.read_done * BLOCK_SIZE;
@@ -628,6 +725,8 @@ static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
     if (pending == 0 && !block_mig_state.bulk_completed) {
         pending = BLOCK_SIZE;
     }
+    blk_mig_unlock();
+    qemu_mutex_unlock_iothread();
 
     DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
     return pending;
@@ -680,12 +779,15 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
                 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
             }
 
-            buf = g_malloc(BLOCK_SIZE);
-
-            qemu_get_buffer(f, buf, BLOCK_SIZE);
-            ret = bdrv_write(bs, addr, buf, nr_sectors);
+            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+                ret = bdrv_write_zeroes(bs, addr, nr_sectors);
+            } else {
+                buf = g_malloc(BLOCK_SIZE);
+                qemu_get_buffer(f, buf, BLOCK_SIZE);
+                ret = bdrv_write(bs, addr, buf, nr_sectors);
+                g_free(buf);
+            }
 
-            g_free(buf);
             if (ret < 0) {
                 return ret;
             }
@@ -739,6 +841,7 @@ void blk_mig_init(void)
 {
     QSIMPLEQ_INIT(&block_mig_state.bmds_list);
     QSIMPLEQ_INIT(&block_mig_state.blk_list);
+    qemu_mutex_init(&block_mig_state.lock);
 
     register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
                          &block_mig_state);