]> git.proxmox.com Git - mirror_qemu.git/commitdiff
Start migrating migration code into a migration directory
authorDr. David Alan Gilbert <dgilbert@redhat.com>
Fri, 12 Dec 2014 11:13:38 +0000 (11:13 +0000)
committerAmit Shah <amit.shah@redhat.com>
Tue, 16 Dec 2014 12:17:36 +0000 (17:47 +0530)
The migration code now occupies a fair chunk of the top level .c
files, it seems time to give it it's own directory.

I've not touched:
   arch_init.c - that's mostly RAM migration but has a few random other
                 bits
   savevm.c    - because it's built target specific

This is purely a code move; no code has changed.
   - it fails checkpatch because of old violations, it feels safer
     to keep this as purely a move and fix those at some mythical future
     date.

The xbzrle and vmstate tests are now only run for softmmu builds
since they require files in the migrate/ directory which is only built
for softmmu.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
27 files changed:
Makefile.objs
block-migration.c [deleted file]
migration-exec.c [deleted file]
migration-fd.c [deleted file]
migration-rdma.c [deleted file]
migration-tcp.c [deleted file]
migration-unix.c [deleted file]
migration.c [deleted file]
migration/Makefile.objs [new file with mode: 0644]
migration/block-migration.c [new file with mode: 0644]
migration/migration-exec.c [new file with mode: 0644]
migration/migration-fd.c [new file with mode: 0644]
migration/migration-rdma.c [new file with mode: 0644]
migration/migration-tcp.c [new file with mode: 0644]
migration/migration-unix.c [new file with mode: 0644]
migration/migration.c [new file with mode: 0644]
migration/qemu-file-stdio.c [new file with mode: 0644]
migration/qemu-file-unix.c [new file with mode: 0644]
migration/qemu-file.c [new file with mode: 0644]
migration/vmstate.c [new file with mode: 0644]
migration/xbzrle.c [new file with mode: 0644]
qemu-file-stdio.c [deleted file]
qemu-file-unix.c [deleted file]
qemu-file.c [deleted file]
tests/Makefile
vmstate.c [deleted file]
xbzrle.c [deleted file]

index 18fd35cf15617bf96dfbe518d74b86ae727dac12..abeb902b58523405df3c69b1bfd61e83009fe839 100644 (file)
@@ -48,15 +48,9 @@ common-obj-$(CONFIG_POSIX) += os-posix.o
 
 common-obj-$(CONFIG_LINUX) += fsdev/
 
-common-obj-y += migration.o migration-tcp.o
-common-obj-y += vmstate.o
-common-obj-y += qemu-file.o qemu-file-unix.o qemu-file-stdio.o
-common-obj-$(CONFIG_RDMA) += migration-rdma.o
+common-obj-y += migration/
 common-obj-y += qemu-char.o #aio.o
-common-obj-y += block-migration.o
-common-obj-y += page_cache.o xbzrle.o
-
-common-obj-$(CONFIG_POSIX) += migration-exec.o migration-unix.o migration-fd.o
+common-obj-y += page_cache.o
 
 common-obj-$(CONFIG_SPICE) += spice-qemu-char.o
 
diff --git a/block-migration.c b/block-migration.c
deleted file mode 100644 (file)
index 74d9eb1..0000000
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * QEMU live block migration
- *
- * Copyright IBM, Corp. 2009
- *
- * Authors:
- *  Liran Schour   <lirans@il.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu-common.h"
-#include "block/block.h"
-#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
-#include "hw/hw.h"
-#include "qemu/queue.h"
-#include "qemu/timer.h"
-#include "migration/block.h"
-#include "migration/migration.h"
-#include "sysemu/blockdev.h"
-#include <assert.h>
-
-#define BLOCK_SIZE                       (1 << 20)
-#define BDRV_SECTORS_PER_DIRTY_CHUNK     (BLOCK_SIZE >> BDRV_SECTOR_BITS)
-
-#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
-#define BLK_MIG_FLAG_EOS                0x02
-#define BLK_MIG_FLAG_PROGRESS           0x04
-#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
-
-#define MAX_IS_ALLOCATED_SEARCH 65536
-
-//#define DEBUG_BLK_MIGRATION
-
-#ifdef DEBUG_BLK_MIGRATION
-#define DPRINTF(fmt, ...) \
-    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-typedef struct BlkMigDevState {
-    /* Written during setup phase.  Can be read without a lock.  */
-    BlockDriverState *bs;
-    int shared_base;
-    int64_t total_sectors;
-    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
-
-    /* Only used by migration thread.  Does not need a lock.  */
-    int bulk_completed;
-    int64_t cur_sector;
-    int64_t cur_dirty;
-
-    /* Protected by block migration lock.  */
-    unsigned long *aio_bitmap;
-    int64_t completed_sectors;
-    BdrvDirtyBitmap *dirty_bitmap;
-    Error *blocker;
-} BlkMigDevState;
-
-typedef struct BlkMigBlock {
-    /* Only used by migration thread.  */
-    uint8_t *buf;
-    BlkMigDevState *bmds;
-    int64_t sector;
-    int nr_sectors;
-    struct iovec iov;
-    QEMUIOVector qiov;
-    BlockAIOCB *aiocb;
-
-    /* Protected by block migration lock.  */
-    int ret;
-    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
-} BlkMigBlock;
-
-typedef struct BlkMigState {
-    /* Written during setup phase.  Can be read without a lock.  */
-    int blk_enable;
-    int shared_base;
-    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
-    int64_t total_sector_sum;
-    bool zero_blocks;
-
-    /* Protected by lock.  */
-    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
-    int submitted;
-    int read_done;
-
-    /* Only used by migration thread.  Does not need a lock.  */
-    int transferred;
-    int prev_progress;
-    int bulk_completed;
-
-    /* Lock must be taken _inside_ the iothread lock.  */
-    QemuMutex lock;
-} BlkMigState;
-
-static BlkMigState block_mig_state;
-
-static void blk_mig_lock(void)
-{
-    qemu_mutex_lock(&block_mig_state.lock);
-}
-
-static void blk_mig_unlock(void)
-{
-    qemu_mutex_unlock(&block_mig_state.lock);
-}
-
-/* Must run outside of the iothread lock during the bulk phase,
- * or the VM will stall.
- */
-
-static void blk_send(QEMUFile *f, BlkMigBlock * blk)
-{
-    int len;
-    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
-
-    if (block_mig_state.zero_blocks &&
-        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
-        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
-    }
-
-    /* sector number and flags */
-    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
-                     | flags);
-
-    /* device name */
-    len = strlen(bdrv_get_device_name(blk->bmds->bs));
-    qemu_put_byte(f, len);
-    qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len);
-
-    /* if a block is zero we need to flush here since the network
-     * bandwidth is now a lot higher than the storage device bandwidth.
-     * thus if we queue zero blocks we slow down the migration */
-    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
-        qemu_fflush(f);
-        return;
-    }
-
-    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
-}
-
-int blk_mig_active(void)
-{
-    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
-}
-
-uint64_t blk_mig_bytes_transferred(void)
-{
-    BlkMigDevState *bmds;
-    uint64_t sum = 0;
-
-    blk_mig_lock();
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        sum += bmds->completed_sectors;
-    }
-    blk_mig_unlock();
-    return sum << BDRV_SECTOR_BITS;
-}
-
-uint64_t blk_mig_bytes_remaining(void)
-{
-    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
-}
-
-uint64_t blk_mig_bytes_total(void)
-{
-    BlkMigDevState *bmds;
-    uint64_t sum = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        sum += bmds->total_sectors;
-    }
-    return sum << BDRV_SECTOR_BITS;
-}
-
-
-/* Called with migration lock held.  */
-
-static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
-{
-    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    if (sector < bdrv_nb_sectors(bmds->bs)) {
-        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
-            (1UL << (chunk % (sizeof(unsigned long) * 8))));
-    } else {
-        return 0;
-    }
-}
-
-/* Called with migration lock held.  */
-
-static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
-                             int nb_sectors, int set)
-{
-    int64_t start, end;
-    unsigned long val, idx, bit;
-
-    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
-    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    for (; start <= end; start++) {
-        idx = start / (sizeof(unsigned long) * 8);
-        bit = start % (sizeof(unsigned long) * 8);
-        val = bmds->aio_bitmap[idx];
-        if (set) {
-            val |= 1UL << bit;
-        } else {
-            val &= ~(1UL << bit);
-        }
-        bmds->aio_bitmap[idx] = val;
-    }
-}
-
-static void alloc_aio_bitmap(BlkMigDevState *bmds)
-{
-    BlockDriverState *bs = bmds->bs;
-    int64_t bitmap_size;
-
-    bitmap_size = bdrv_nb_sectors(bs) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
-    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
-
-    bmds->aio_bitmap = g_malloc0(bitmap_size);
-}
-
-/* Never hold migration lock when yielding to the main loop!  */
-
-static void blk_mig_read_cb(void *opaque, int ret)
-{
-    BlkMigBlock *blk = opaque;
-
-    blk_mig_lock();
-    blk->ret = ret;
-
-    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
-    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
-
-    block_mig_state.submitted--;
-    block_mig_state.read_done++;
-    assert(block_mig_state.submitted >= 0);
-    blk_mig_unlock();
-}
-
-/* Called with no lock taken.  */
-
-static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
-{
-    int64_t total_sectors = bmds->total_sectors;
-    int64_t cur_sector = bmds->cur_sector;
-    BlockDriverState *bs = bmds->bs;
-    BlkMigBlock *blk;
-    int nr_sectors;
-
-    if (bmds->shared_base) {
-        qemu_mutex_lock_iothread();
-        while (cur_sector < total_sectors &&
-               !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
-                                  &nr_sectors)) {
-            cur_sector += nr_sectors;
-        }
-        qemu_mutex_unlock_iothread();
-    }
-
-    if (cur_sector >= total_sectors) {
-        bmds->cur_sector = bmds->completed_sectors = total_sectors;
-        return 1;
-    }
-
-    bmds->completed_sectors = cur_sector;
-
-    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
-
-    /* we are going to transfer a full block even if it is not allocated */
-    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-
-    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-        nr_sectors = total_sectors - cur_sector;
-    }
-
-    blk = g_new(BlkMigBlock, 1);
-    blk->buf = g_malloc(BLOCK_SIZE);
-    blk->bmds = bmds;
-    blk->sector = cur_sector;
-    blk->nr_sectors = nr_sectors;
-
-    blk->iov.iov_base = blk->buf;
-    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
-    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
-
-    blk_mig_lock();
-    block_mig_state.submitted++;
-    blk_mig_unlock();
-
-    qemu_mutex_lock_iothread();
-    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
-                                nr_sectors, blk_mig_read_cb, blk);
-
-    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
-    qemu_mutex_unlock_iothread();
-
-    bmds->cur_sector = cur_sector + nr_sectors;
-    return (bmds->cur_sector >= total_sectors);
-}
-
-/* Called with iothread lock taken.  */
-
-static int set_dirty_tracking(void)
-{
-    BlkMigDevState *bmds;
-    int ret;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
-                                                      NULL);
-        if (!bmds->dirty_bitmap) {
-            ret = -errno;
-            goto fail;
-        }
-    }
-    return 0;
-
-fail:
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        if (bmds->dirty_bitmap) {
-            bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
-        }
-    }
-    return ret;
-}
-
-static void unset_dirty_tracking(void)
-{
-    BlkMigDevState *bmds;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
-    }
-}
-
-static void init_blk_migration(QEMUFile *f)
-{
-    BlockDriverState *bs;
-    BlkMigDevState *bmds;
-    int64_t sectors;
-
-    block_mig_state.submitted = 0;
-    block_mig_state.read_done = 0;
-    block_mig_state.transferred = 0;
-    block_mig_state.total_sector_sum = 0;
-    block_mig_state.prev_progress = -1;
-    block_mig_state.bulk_completed = 0;
-    block_mig_state.zero_blocks = migrate_zero_blocks();
-
-    for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
-        if (bdrv_is_read_only(bs)) {
-            continue;
-        }
-
-        sectors = bdrv_nb_sectors(bs);
-        if (sectors <= 0) {
-            return;
-        }
-
-        bmds = g_new0(BlkMigDevState, 1);
-        bmds->bs = bs;
-        bmds->bulk_completed = 0;
-        bmds->total_sectors = sectors;
-        bmds->completed_sectors = 0;
-        bmds->shared_base = block_mig_state.shared_base;
-        alloc_aio_bitmap(bmds);
-        error_setg(&bmds->blocker, "block device is in use by migration");
-        bdrv_op_block_all(bs, bmds->blocker);
-        bdrv_ref(bs);
-
-        block_mig_state.total_sector_sum += sectors;
-
-        if (bmds->shared_base) {
-            DPRINTF("Start migration for %s with shared base image\n",
-                    bdrv_get_device_name(bs));
-        } else {
-            DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
-        }
-
-        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
-    }
-}
-
-/* Called with no lock taken.  */
-
-static int blk_mig_save_bulked_block(QEMUFile *f)
-{
-    int64_t completed_sector_sum = 0;
-    BlkMigDevState *bmds;
-    int progress;
-    int ret = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        if (bmds->bulk_completed == 0) {
-            if (mig_save_device_bulk(f, bmds) == 1) {
-                /* completed bulk section for this device */
-                bmds->bulk_completed = 1;
-            }
-            completed_sector_sum += bmds->completed_sectors;
-            ret = 1;
-            break;
-        } else {
-            completed_sector_sum += bmds->completed_sectors;
-        }
-    }
-
-    if (block_mig_state.total_sector_sum != 0) {
-        progress = completed_sector_sum * 100 /
-                   block_mig_state.total_sector_sum;
-    } else {
-        progress = 100;
-    }
-    if (progress != block_mig_state.prev_progress) {
-        block_mig_state.prev_progress = progress;
-        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
-                         | BLK_MIG_FLAG_PROGRESS);
-        DPRINTF("Completed %d %%\r", progress);
-    }
-
-    return ret;
-}
-
-static void blk_mig_reset_dirty_cursor(void)
-{
-    BlkMigDevState *bmds;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        bmds->cur_dirty = 0;
-    }
-}
-
-/* Called with iothread lock taken.  */
-
-static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
-                                 int is_async)
-{
-    BlkMigBlock *blk;
-    int64_t total_sectors = bmds->total_sectors;
-    int64_t sector;
-    int nr_sectors;
-    int ret = -EIO;
-
-    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
-        blk_mig_lock();
-        if (bmds_aio_inflight(bmds, sector)) {
-            blk_mig_unlock();
-            bdrv_drain_all();
-        } else {
-            blk_mig_unlock();
-        }
-        if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
-
-            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-                nr_sectors = total_sectors - sector;
-            } else {
-                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-            }
-            blk = g_new(BlkMigBlock, 1);
-            blk->buf = g_malloc(BLOCK_SIZE);
-            blk->bmds = bmds;
-            blk->sector = sector;
-            blk->nr_sectors = nr_sectors;
-
-            if (is_async) {
-                blk->iov.iov_base = blk->buf;
-                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
-                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
-
-                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
-                                            nr_sectors, blk_mig_read_cb, blk);
-
-                blk_mig_lock();
-                block_mig_state.submitted++;
-                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
-                blk_mig_unlock();
-            } else {
-                ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
-                if (ret < 0) {
-                    goto error;
-                }
-                blk_send(f, blk);
-
-                g_free(blk->buf);
-                g_free(blk);
-            }
-
-            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
-            break;
-        }
-        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
-        bmds->cur_dirty = sector;
-    }
-
-    return (bmds->cur_dirty >= bmds->total_sectors);
-
-error:
-    DPRINTF("Error reading sector %" PRId64 "\n", sector);
-    g_free(blk->buf);
-    g_free(blk);
-    return ret;
-}
-
-/* Called with iothread lock taken.
- *
- * return value:
- * 0: too much data for max_downtime
- * 1: few enough data for max_downtime
-*/
-static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
-{
-    BlkMigDevState *bmds;
-    int ret = 1;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        ret = mig_save_device_dirty(f, bmds, is_async);
-        if (ret <= 0) {
-            break;
-        }
-    }
-
-    return ret;
-}
-
-/* Called with no locks taken.  */
-
-static int flush_blks(QEMUFile *f)
-{
-    BlkMigBlock *blk;
-    int ret = 0;
-
-    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
-            __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
-            block_mig_state.transferred);
-
-    blk_mig_lock();
-    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
-        if (qemu_file_rate_limit(f)) {
-            break;
-        }
-        if (blk->ret < 0) {
-            ret = blk->ret;
-            break;
-        }
-
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
-        blk_mig_unlock();
-        blk_send(f, blk);
-        blk_mig_lock();
-
-        g_free(blk->buf);
-        g_free(blk);
-
-        block_mig_state.read_done--;
-        block_mig_state.transferred++;
-        assert(block_mig_state.read_done >= 0);
-    }
-    blk_mig_unlock();
-
-    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
-            block_mig_state.submitted, block_mig_state.read_done,
-            block_mig_state.transferred);
-    return ret;
-}
-
-/* Called with iothread lock taken.  */
-
-static int64_t get_remaining_dirty(void)
-{
-    BlkMigDevState *bmds;
-    int64_t dirty = 0;
-
-    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap);
-    }
-
-    return dirty << BDRV_SECTOR_BITS;
-}
-
-/* Called with iothread lock taken.  */
-
-static void blk_mig_cleanup(void)
-{
-    BlkMigDevState *bmds;
-    BlkMigBlock *blk;
-
-    bdrv_drain_all();
-
-    unset_dirty_tracking();
-
-    blk_mig_lock();
-    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
-        bdrv_op_unblock_all(bmds->bs, bmds->blocker);
-        error_free(bmds->blocker);
-        bdrv_unref(bmds->bs);
-        g_free(bmds->aio_bitmap);
-        g_free(bmds);
-    }
-
-    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
-        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
-        g_free(blk->buf);
-        g_free(blk);
-    }
-    blk_mig_unlock();
-}
-
-static void block_migration_cancel(void *opaque)
-{
-    blk_mig_cleanup();
-}
-
-static int block_save_setup(QEMUFile *f, void *opaque)
-{
-    int ret;
-
-    DPRINTF("Enter save live setup submitted %d transferred %d\n",
-            block_mig_state.submitted, block_mig_state.transferred);
-
-    qemu_mutex_lock_iothread();
-    init_blk_migration(f);
-
-    /* start track dirty blocks */
-    ret = set_dirty_tracking();
-
-    if (ret) {
-        qemu_mutex_unlock_iothread();
-        return ret;
-    }
-
-    qemu_mutex_unlock_iothread();
-
-    ret = flush_blks(f);
-    blk_mig_reset_dirty_cursor();
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-
-    return ret;
-}
-
-static int block_save_iterate(QEMUFile *f, void *opaque)
-{
-    int ret;
-    int64_t last_ftell = qemu_ftell(f);
-    int64_t delta_ftell;
-
-    DPRINTF("Enter save live iterate submitted %d transferred %d\n",
-            block_mig_state.submitted, block_mig_state.transferred);
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    blk_mig_reset_dirty_cursor();
-
-    /* control the rate of transfer */
-    blk_mig_lock();
-    while ((block_mig_state.submitted +
-            block_mig_state.read_done) * BLOCK_SIZE <
-           qemu_file_get_rate_limit(f)) {
-        blk_mig_unlock();
-        if (block_mig_state.bulk_completed == 0) {
-            /* first finish the bulk phase */
-            if (blk_mig_save_bulked_block(f) == 0) {
-                /* finished saving bulk on all devices */
-                block_mig_state.bulk_completed = 1;
-            }
-            ret = 0;
-        } else {
-            /* Always called with iothread lock taken for
-             * simplicity, block_save_complete also calls it.
-             */
-            qemu_mutex_lock_iothread();
-            ret = blk_mig_save_dirty_block(f, 1);
-            qemu_mutex_unlock_iothread();
-        }
-        if (ret < 0) {
-            return ret;
-        }
-        blk_mig_lock();
-        if (ret != 0) {
-            /* no more dirty blocks */
-            break;
-        }
-    }
-    blk_mig_unlock();
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-    delta_ftell = qemu_ftell(f) - last_ftell;
-    if (delta_ftell > 0) {
-        return 1;
-    } else if (delta_ftell < 0) {
-        return -1;
-    } else {
-        return 0;
-    }
-}
-
-/* Called with iothread lock taken.  */
-
-static int block_save_complete(QEMUFile *f, void *opaque)
-{
-    int ret;
-
-    DPRINTF("Enter save live complete submitted %d transferred %d\n",
-            block_mig_state.submitted, block_mig_state.transferred);
-
-    ret = flush_blks(f);
-    if (ret) {
-        return ret;
-    }
-
-    blk_mig_reset_dirty_cursor();
-
-    /* we know for sure that save bulk is completed and
-       all async read completed */
-    blk_mig_lock();
-    assert(block_mig_state.submitted == 0);
-    blk_mig_unlock();
-
-    do {
-        ret = blk_mig_save_dirty_block(f, 0);
-        if (ret < 0) {
-            return ret;
-        }
-    } while (ret == 0);
-
-    /* report completion */
-    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
-
-    DPRINTF("Block migration completed\n");
-
-    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
-
-    blk_mig_cleanup();
-    return 0;
-}
-
-static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
-{
-    /* Estimate pending number of bytes to send */
-    uint64_t pending;
-
-    qemu_mutex_lock_iothread();
-    blk_mig_lock();
-    pending = get_remaining_dirty() +
-                       block_mig_state.submitted * BLOCK_SIZE +
-                       block_mig_state.read_done * BLOCK_SIZE;
-
-    /* Report at least one block pending during bulk phase */
-    if (pending == 0 && !block_mig_state.bulk_completed) {
-        pending = BLOCK_SIZE;
-    }
-    blk_mig_unlock();
-    qemu_mutex_unlock_iothread();
-
-    DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
-    return pending;
-}
-
-static int block_load(QEMUFile *f, void *opaque, int version_id)
-{
-    static int banner_printed;
-    int len, flags;
-    char device_name[256];
-    int64_t addr;
-    BlockDriverState *bs, *bs_prev = NULL;
-    uint8_t *buf;
-    int64_t total_sectors = 0;
-    int nr_sectors;
-    int ret;
-
-    do {
-        addr = qemu_get_be64(f);
-
-        flags = addr & ~BDRV_SECTOR_MASK;
-        addr >>= BDRV_SECTOR_BITS;
-
-        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
-            /* get device name */
-            len = qemu_get_byte(f);
-            qemu_get_buffer(f, (uint8_t *)device_name, len);
-            device_name[len] = '\0';
-
-            bs = bdrv_find(device_name);
-            if (!bs) {
-                fprintf(stderr, "Error unknown block device %s\n",
-                        device_name);
-                return -EINVAL;
-            }
-
-            if (bs != bs_prev) {
-                bs_prev = bs;
-                total_sectors = bdrv_nb_sectors(bs);
-                if (total_sectors <= 0) {
-                    error_report("Error getting length of block device %s",
-                                 device_name);
-                    return -EINVAL;
-                }
-            }
-
-            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
-                nr_sectors = total_sectors - addr;
-            } else {
-                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
-            }
-
-            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
-                ret = bdrv_write_zeroes(bs, addr, nr_sectors,
-                                        BDRV_REQ_MAY_UNMAP);
-            } else {
-                buf = g_malloc(BLOCK_SIZE);
-                qemu_get_buffer(f, buf, BLOCK_SIZE);
-                ret = bdrv_write(bs, addr, buf, nr_sectors);
-                g_free(buf);
-            }
-
-            if (ret < 0) {
-                return ret;
-            }
-        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
-            if (!banner_printed) {
-                printf("Receiving block device images\n");
-                banner_printed = 1;
-            }
-            printf("Completed %d %%%c", (int)addr,
-                   (addr == 100) ? '\n' : '\r');
-            fflush(stdout);
-        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
-            fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
-            return -EINVAL;
-        }
-        ret = qemu_file_get_error(f);
-        if (ret != 0) {
-            return ret;
-        }
-    } while (!(flags & BLK_MIG_FLAG_EOS));
-
-    return 0;
-}
-
-static void block_set_params(const MigrationParams *params, void *opaque)
-{
-    block_mig_state.blk_enable = params->blk;
-    block_mig_state.shared_base = params->shared;
-
-    /* shared base means that blk_enable = 1 */
-    block_mig_state.blk_enable |= params->shared;
-}
-
-static bool block_is_active(void *opaque)
-{
-    return block_mig_state.blk_enable == 1;
-}
-
-static SaveVMHandlers savevm_block_handlers = {
-    .set_params = block_set_params,
-    .save_live_setup = block_save_setup,
-    .save_live_iterate = block_save_iterate,
-    .save_live_complete = block_save_complete,
-    .save_live_pending = block_save_pending,
-    .load_state = block_load,
-    .cancel = block_migration_cancel,
-    .is_active = block_is_active,
-};
-
-void blk_mig_init(void)
-{
-    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
-    QSIMPLEQ_INIT(&block_mig_state.blk_list);
-    qemu_mutex_init(&block_mig_state.lock);
-
-    register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
-                         &block_mig_state);
-}
diff --git a/migration-exec.c b/migration-exec.c
deleted file mode 100644 (file)
index 4790247..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * QEMU live migration
- *
- * Copyright IBM, Corp. 2008
- * Copyright Dell MessageOne 2008
- *
- * Authors:
- *  Anthony Liguori   <aliguori@us.ibm.com>
- *  Charles Duffy     <charles_duffy@messageone.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu-common.h"
-#include "qemu/sockets.h"
-#include "qemu/main-loop.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "block/block.h"
-#include <sys/types.h>
-#include <sys/wait.h>
-
-//#define DEBUG_MIGRATION_EXEC
-
-#ifdef DEBUG_MIGRATION_EXEC
-#define DPRINTF(fmt, ...) \
-    do { printf("migration-exec: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp)
-{
-    s->file = qemu_popen_cmd(command, "w");
-    if (s->file == NULL) {
-        error_setg_errno(errp, errno, "failed to popen the migration target");
-        return;
-    }
-
-    migrate_fd_connect(s);
-}
-
-static void exec_accept_incoming_migration(void *opaque)
-{
-    QEMUFile *f = opaque;
-
-    qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL);
-    process_incoming_migration(f);
-}
-
-void exec_start_incoming_migration(const char *command, Error **errp)
-{
-    QEMUFile *f;
-
-    DPRINTF("Attempting to start an incoming migration\n");
-    f = qemu_popen_cmd(command, "r");
-    if(f == NULL) {
-        error_setg_errno(errp, errno, "failed to popen the migration source");
-        return;
-    }
-
-    qemu_set_fd_handler2(qemu_get_fd(f), NULL,
-                        exec_accept_incoming_migration, NULL, f);
-}
diff --git a/migration-fd.c b/migration-fd.c
deleted file mode 100644 (file)
index d2e523a..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * QEMU live migration via generic fd
- *
- * Copyright Red Hat, Inc. 2009
- *
- * Authors:
- *  Chris Lalancette <clalance@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu-common.h"
-#include "qemu/main-loop.h"
-#include "qemu/sockets.h"
-#include "migration/migration.h"
-#include "monitor/monitor.h"
-#include "migration/qemu-file.h"
-#include "block/block.h"
-
-//#define DEBUG_MIGRATION_FD
-
-#ifdef DEBUG_MIGRATION_FD
-#define DPRINTF(fmt, ...) \
-    do { printf("migration-fd: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp)
-{
-    int fd = monitor_get_fd(cur_mon, fdname, errp);
-    if (fd == -1) {
-        return;
-    }
-    s->file = qemu_fdopen(fd, "wb");
-
-    migrate_fd_connect(s);
-}
-
-static void fd_accept_incoming_migration(void *opaque)
-{
-    QEMUFile *f = opaque;
-
-    qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL);
-    process_incoming_migration(f);
-}
-
-void fd_start_incoming_migration(const char *infd, Error **errp)
-{
-    int fd;
-    QEMUFile *f;
-
-    DPRINTF("Attempting to start an incoming migration via fd\n");
-
-    fd = strtol(infd, NULL, 0);
-    f = qemu_fdopen(fd, "rb");
-    if(f == NULL) {
-        error_setg_errno(errp, errno, "failed to open the source descriptor");
-        return;
-    }
-
-    qemu_set_fd_handler2(fd, NULL, fd_accept_incoming_migration, NULL, f);
-}
diff --git a/migration-rdma.c b/migration-rdma.c
deleted file mode 100644 (file)
index b32dbdf..0000000
+++ /dev/null
@@ -1,3438 +0,0 @@
-/*
- * RDMA protocol and interfaces
- *
- * Copyright IBM, Corp. 2010-2013
- *
- * Authors:
- *  Michael R. Hines <mrhines@us.ibm.com>
- *  Jiuxing Liu <jl@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later.  See the COPYING file in the top-level directory.
- *
- */
-#include "qemu-common.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "exec/cpu-common.h"
-#include "qemu/main-loop.h"
-#include "qemu/sockets.h"
-#include "qemu/bitmap.h"
-#include "block/coroutine.h"
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netdb.h>
-#include <arpa/inet.h>
-#include <string.h>
-#include <rdma/rdma_cma.h>
-
-//#define DEBUG_RDMA
-//#define DEBUG_RDMA_VERBOSE
-//#define DEBUG_RDMA_REALLY_VERBOSE
-
-#ifdef DEBUG_RDMA
-#define DPRINTF(fmt, ...) \
-    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-#ifdef DEBUG_RDMA_VERBOSE
-#define DDPRINTF(fmt, ...) \
-    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DDPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-#ifdef DEBUG_RDMA_REALLY_VERBOSE
-#define DDDPRINTF(fmt, ...) \
-    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DDDPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-/*
- * Print and error on both the Monitor and the Log file.
- */
-#define ERROR(errp, fmt, ...) \
-    do { \
-        fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
-        if (errp && (*(errp) == NULL)) { \
-            error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
-        } \
-    } while (0)
-
-#define RDMA_RESOLVE_TIMEOUT_MS 10000
-
-/* Do not merge data if larger than this. */
-#define RDMA_MERGE_MAX (2 * 1024 * 1024)
-#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
-
-#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
-
-/*
- * This is only for non-live state being migrated.
- * Instead of RDMA_WRITE messages, we use RDMA_SEND
- * messages for that state, which requires a different
- * delivery design than main memory.
- */
-#define RDMA_SEND_INCREMENT 32768
-
-/*
- * Maximum size infiniband SEND message
- */
-#define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
-#define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
-
-#define RDMA_CONTROL_VERSION_CURRENT 1
-/*
- * Capabilities for negotiation.
- */
-#define RDMA_CAPABILITY_PIN_ALL 0x01
-
-/*
- * Add the other flags above to this list of known capabilities
- * as they are introduced.
- */
-static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
-
-#define CHECK_ERROR_STATE() \
-    do { \
-        if (rdma->error_state) { \
-            if (!rdma->error_reported) { \
-                fprintf(stderr, "RDMA is in an error state waiting migration" \
-                                " to abort!\n"); \
-                rdma->error_reported = 1; \
-            } \
-            return rdma->error_state; \
-        } \
-    } while (0);
-
-/*
- * A work request ID is 64-bits and we split up these bits
- * into 3 parts:
- *
- * bits 0-15 : type of control message, 2^16
- * bits 16-29: ram block index, 2^14
- * bits 30-63: ram block chunk number, 2^34
- *
- * The last two bit ranges are only used for RDMA writes,
- * in order to track their completion and potentially
- * also track unregistration status of the message.
- */
-#define RDMA_WRID_TYPE_SHIFT  0UL
-#define RDMA_WRID_BLOCK_SHIFT 16UL
-#define RDMA_WRID_CHUNK_SHIFT 30UL
-
-#define RDMA_WRID_TYPE_MASK \
-    ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
-
-#define RDMA_WRID_BLOCK_MASK \
-    (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
-
-#define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
-
-/*
- * RDMA migration protocol:
- * 1. RDMA Writes (data messages, i.e. RAM)
- * 2. IB Send/Recv (control channel messages)
- */
-enum {
-    RDMA_WRID_NONE = 0,
-    RDMA_WRID_RDMA_WRITE = 1,
-    RDMA_WRID_SEND_CONTROL = 2000,
-    RDMA_WRID_RECV_CONTROL = 4000,
-};
-
-const char *wrid_desc[] = {
-    [RDMA_WRID_NONE] = "NONE",
-    [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
-    [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
-    [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV",
-};
-
-/*
- * Work request IDs for IB SEND messages only (not RDMA writes).
- * This is used by the migration protocol to transmit
- * control messages (such as device state and registration commands)
- *
- * We could use more WRs, but we have enough for now.
- */
-enum {
-    RDMA_WRID_READY = 0,
-    RDMA_WRID_DATA,
-    RDMA_WRID_CONTROL,
-    RDMA_WRID_MAX,
-};
-
-/*
- * SEND/RECV IB Control Messages.
- */
-enum {
-    RDMA_CONTROL_NONE = 0,
-    RDMA_CONTROL_ERROR,
-    RDMA_CONTROL_READY,               /* ready to receive */
-    RDMA_CONTROL_QEMU_FILE,           /* QEMUFile-transmitted bytes */
-    RDMA_CONTROL_RAM_BLOCKS_REQUEST,  /* RAMBlock synchronization */
-    RDMA_CONTROL_RAM_BLOCKS_RESULT,   /* RAMBlock synchronization */
-    RDMA_CONTROL_COMPRESS,            /* page contains repeat values */
-    RDMA_CONTROL_REGISTER_REQUEST,    /* dynamic page registration */
-    RDMA_CONTROL_REGISTER_RESULT,     /* key to use after registration */
-    RDMA_CONTROL_REGISTER_FINISHED,   /* current iteration finished */
-    RDMA_CONTROL_UNREGISTER_REQUEST,  /* dynamic UN-registration */
-    RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
-};
-
-const char *control_desc[] = {
-    [RDMA_CONTROL_NONE] = "NONE",
-    [RDMA_CONTROL_ERROR] = "ERROR",
-    [RDMA_CONTROL_READY] = "READY",
-    [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE",
-    [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST",
-    [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT",
-    [RDMA_CONTROL_COMPRESS] = "COMPRESS",
-    [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST",
-    [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT",
-    [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED",
-    [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST",
-    [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED",
-};
-
-/*
- * Memory and MR structures used to represent an IB Send/Recv work request.
- * This is *not* used for RDMA writes, only IB Send/Recv.
- */
-typedef struct {
-    uint8_t  control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */
-    struct   ibv_mr *control_mr;               /* registration metadata */
-    size_t   control_len;                      /* length of the message */
-    uint8_t *control_curr;                     /* start of unconsumed bytes */
-} RDMAWorkRequestData;
-
-/*
- * Negotiate RDMA capabilities during connection-setup time.
- */
-typedef struct {
-    uint32_t version;
-    uint32_t flags;
-} RDMACapabilities;
-
-static void caps_to_network(RDMACapabilities *cap)
-{
-    cap->version = htonl(cap->version);
-    cap->flags = htonl(cap->flags);
-}
-
-static void network_to_caps(RDMACapabilities *cap)
-{
-    cap->version = ntohl(cap->version);
-    cap->flags = ntohl(cap->flags);
-}
-
-/*
- * Representation of a RAMBlock from an RDMA perspective.
- * This is not transmitted, only local.
- * This and subsequent structures cannot be linked lists
- * because we're using a single IB message to transmit
- * the information. It's small anyway, so a list is overkill.
- */
-typedef struct RDMALocalBlock {
-    uint8_t  *local_host_addr; /* local virtual address */
-    uint64_t remote_host_addr; /* remote virtual address */
-    uint64_t offset;
-    uint64_t length;
-    struct   ibv_mr **pmr;     /* MRs for chunk-level registration */
-    struct   ibv_mr *mr;       /* MR for non-chunk-level registration */
-    uint32_t *remote_keys;     /* rkeys for chunk-level registration */
-    uint32_t remote_rkey;      /* rkeys for non-chunk-level registration */
-    int      index;            /* which block are we */
-    bool     is_ram_block;
-    int      nb_chunks;
-    unsigned long *transit_bitmap;
-    unsigned long *unregister_bitmap;
-} RDMALocalBlock;
-
-/*
- * Also represents a RAMblock, but only on the dest.
- * This gets transmitted by the dest during connection-time
- * to the source VM and then is used to populate the
- * corresponding RDMALocalBlock with
- * the information needed to perform the actual RDMA.
- */
-typedef struct QEMU_PACKED RDMARemoteBlock {
-    uint64_t remote_host_addr;
-    uint64_t offset;
-    uint64_t length;
-    uint32_t remote_rkey;
-    uint32_t padding;
-} RDMARemoteBlock;
-
-static uint64_t htonll(uint64_t v)
-{
-    union { uint32_t lv[2]; uint64_t llv; } u;
-    u.lv[0] = htonl(v >> 32);
-    u.lv[1] = htonl(v & 0xFFFFFFFFULL);
-    return u.llv;
-}
-
-static uint64_t ntohll(uint64_t v) {
-    union { uint32_t lv[2]; uint64_t llv; } u;
-    u.llv = v;
-    return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]);
-}
-
-static void remote_block_to_network(RDMARemoteBlock *rb)
-{
-    rb->remote_host_addr = htonll(rb->remote_host_addr);
-    rb->offset = htonll(rb->offset);
-    rb->length = htonll(rb->length);
-    rb->remote_rkey = htonl(rb->remote_rkey);
-}
-
-static void network_to_remote_block(RDMARemoteBlock *rb)
-{
-    rb->remote_host_addr = ntohll(rb->remote_host_addr);
-    rb->offset = ntohll(rb->offset);
-    rb->length = ntohll(rb->length);
-    rb->remote_rkey = ntohl(rb->remote_rkey);
-}
-
-/*
- * Virtual address of the above structures used for transmitting
- * the RAMBlock descriptions at connection-time.
- * This structure is *not* transmitted.
- */
-typedef struct RDMALocalBlocks {
-    int nb_blocks;
-    bool     init;             /* main memory init complete */
-    RDMALocalBlock *block;
-} RDMALocalBlocks;
-
-/*
- * Main data structure for RDMA state.
- * While there is only one copy of this structure being allocated right now,
- * this is the place where one would start if you wanted to consider
- * having more than one RDMA connection open at the same time.
- */
-typedef struct RDMAContext {
-    char *host;
-    int port;
-
-    RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
-
-    /*
-     * This is used by *_exchange_send() to figure out whether or not
-     * the initial "READY" message has already been received or not.
-     * This is because other functions may potentially poll() and detect
-     * the READY message before send() does, in which case we need to
-     * know if it completed.
-     */
-    int control_ready_expected;
-
-    /* number of outstanding writes */
-    int nb_sent;
-
-    /* store info about current buffer so that we can
-       merge it with future sends */
-    uint64_t current_addr;
-    uint64_t current_length;
-    /* index of ram block the current buffer belongs to */
-    int current_index;
-    /* index of the chunk in the current ram block */
-    int current_chunk;
-
-    bool pin_all;
-
-    /*
-     * infiniband-specific variables for opening the device
-     * and maintaining connection state and so forth.
-     *
-     * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
-     * cm_id->verbs, cm_id->channel, and cm_id->qp.
-     */
-    struct rdma_cm_id *cm_id;               /* connection manager ID */
-    struct rdma_cm_id *listen_id;
-    bool connected;
-
-    struct ibv_context          *verbs;
-    struct rdma_event_channel   *channel;
-    struct ibv_qp *qp;                      /* queue pair */
-    struct ibv_comp_channel *comp_channel;  /* completion channel */
-    struct ibv_pd *pd;                      /* protection domain */
-    struct ibv_cq *cq;                      /* completion queue */
-
-    /*
-     * If a previous write failed (perhaps because of a failed
-     * memory registration, then do not attempt any future work
-     * and remember the error state.
-     */
-    int error_state;
-    int error_reported;
-
-    /*
-     * Description of ram blocks used throughout the code.
-     */
-    RDMALocalBlocks local_ram_blocks;
-    RDMARemoteBlock *block;
-
-    /*
-     * Migration on *destination* started.
-     * Then use coroutine yield function.
-     * Source runs in a thread, so we don't care.
-     */
-    int migration_started_on_destination;
-
-    int total_registrations;
-    int total_writes;
-
-    int unregister_current, unregister_next;
-    uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
-
-    GHashTable *blockmap;
-} RDMAContext;
-
-/*
- * Interface to the rest of the migration call stack.
- */
-typedef struct QEMUFileRDMA {
-    RDMAContext *rdma;
-    size_t len;
-    void *file;
-} QEMUFileRDMA;
-
-/*
- * Main structure for IB Send/Recv control messages.
- * This gets prepended at the beginning of every Send/Recv.
- */
-typedef struct QEMU_PACKED {
-    uint32_t len;     /* Total length of data portion */
-    uint32_t type;    /* which control command to perform */
-    uint32_t repeat;  /* number of commands in data portion of same type */
-    uint32_t padding;
-} RDMAControlHeader;
-
-static void control_to_network(RDMAControlHeader *control)
-{
-    control->type = htonl(control->type);
-    control->len = htonl(control->len);
-    control->repeat = htonl(control->repeat);
-}
-
-static void network_to_control(RDMAControlHeader *control)
-{
-    control->type = ntohl(control->type);
-    control->len = ntohl(control->len);
-    control->repeat = ntohl(control->repeat);
-}
-
-/*
- * Register a single Chunk.
- * Information sent by the source VM to inform the dest
- * to register an single chunk of memory before we can perform
- * the actual RDMA operation.
- */
-typedef struct QEMU_PACKED {
-    union QEMU_PACKED {
-        uint64_t current_addr;  /* offset into the ramblock of the chunk */
-        uint64_t chunk;         /* chunk to lookup if unregistering */
-    } key;
-    uint32_t current_index; /* which ramblock the chunk belongs to */
-    uint32_t padding;
-    uint64_t chunks;            /* how many sequential chunks to register */
-} RDMARegister;
-
-static void register_to_network(RDMARegister *reg)
-{
-    reg->key.current_addr = htonll(reg->key.current_addr);
-    reg->current_index = htonl(reg->current_index);
-    reg->chunks = htonll(reg->chunks);
-}
-
-static void network_to_register(RDMARegister *reg)
-{
-    reg->key.current_addr = ntohll(reg->key.current_addr);
-    reg->current_index = ntohl(reg->current_index);
-    reg->chunks = ntohll(reg->chunks);
-}
-
-typedef struct QEMU_PACKED {
-    uint32_t value;     /* if zero, we will madvise() */
-    uint32_t block_idx; /* which ram block index */
-    uint64_t offset;    /* where in the remote ramblock this chunk */
-    uint64_t length;    /* length of the chunk */
-} RDMACompress;
-
-static void compress_to_network(RDMACompress *comp)
-{
-    comp->value = htonl(comp->value);
-    comp->block_idx = htonl(comp->block_idx);
-    comp->offset = htonll(comp->offset);
-    comp->length = htonll(comp->length);
-}
-
-static void network_to_compress(RDMACompress *comp)
-{
-    comp->value = ntohl(comp->value);
-    comp->block_idx = ntohl(comp->block_idx);
-    comp->offset = ntohll(comp->offset);
-    comp->length = ntohll(comp->length);
-}
-
-/*
- * The result of the dest's memory registration produces an "rkey"
- * which the source VM must reference in order to perform
- * the RDMA operation.
- */
-typedef struct QEMU_PACKED {
-    uint32_t rkey;
-    uint32_t padding;
-    uint64_t host_addr;
-} RDMARegisterResult;
-
-static void result_to_network(RDMARegisterResult *result)
-{
-    result->rkey = htonl(result->rkey);
-    result->host_addr = htonll(result->host_addr);
-};
-
-static void network_to_result(RDMARegisterResult *result)
-{
-    result->rkey = ntohl(result->rkey);
-    result->host_addr = ntohll(result->host_addr);
-};
-
-const char *print_wrid(int wrid);
-static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
-                                   uint8_t *data, RDMAControlHeader *resp,
-                                   int *resp_idx,
-                                   int (*callback)(RDMAContext *rdma));
-
-static inline uint64_t ram_chunk_index(const uint8_t *start,
-                                       const uint8_t *host)
-{
-    return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
-}
-
-static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
-                                       uint64_t i)
-{
-    return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
-                                    + (i << RDMA_REG_CHUNK_SHIFT));
-}
-
-static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
-                                     uint64_t i)
-{
-    uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
-                                         (1UL << RDMA_REG_CHUNK_SHIFT);
-
-    if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) {
-        result = rdma_ram_block->local_host_addr + rdma_ram_block->length;
-    }
-
-    return result;
-}
-
-static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr,
-                         ram_addr_t block_offset, uint64_t length)
-{
-    RDMALocalBlocks *local = &rdma->local_ram_blocks;
-    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
-        (void *) block_offset);
-    RDMALocalBlock *old = local->block;
-
-    assert(block == NULL);
-
-    local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1));
-
-    if (local->nb_blocks) {
-        int x;
-
-        for (x = 0; x < local->nb_blocks; x++) {
-            g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
-            g_hash_table_insert(rdma->blockmap, (void *)old[x].offset,
-                                                &local->block[x]);
-        }
-        memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks);
-        g_free(old);
-    }
-
-    block = &local->block[local->nb_blocks];
-
-    block->local_host_addr = host_addr;
-    block->offset = block_offset;
-    block->length = length;
-    block->index = local->nb_blocks;
-    block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL;
-    block->transit_bitmap = bitmap_new(block->nb_chunks);
-    bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
-    block->unregister_bitmap = bitmap_new(block->nb_chunks);
-    bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
-    block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t));
-
-    block->is_ram_block = local->init ? false : true;
-
-    g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);
-
-    DDPRINTF("Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
-           " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
-            local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
-            block->length, (uint64_t) (block->local_host_addr + block->length),
-                BITS_TO_LONGS(block->nb_chunks) *
-                    sizeof(unsigned long) * 8, block->nb_chunks);
-
-    local->nb_blocks++;
-
-    return 0;
-}
-
-/*
- * Memory regions need to be registered with the device and queue pairs setup
- * in advanced before the migration starts. This tells us where the RAM blocks
- * are so that we can register them individually.
- */
-static void qemu_rdma_init_one_block(void *host_addr,
-    ram_addr_t block_offset, ram_addr_t length, void *opaque)
-{
-    __qemu_rdma_add_block(opaque, host_addr, block_offset, length);
-}
-
-/*
- * Identify the RAMBlocks and their quantity. They will be references to
- * identify chunk boundaries inside each RAMBlock and also be referenced
- * during dynamic page registration.
- */
-static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
-{
-    RDMALocalBlocks *local = &rdma->local_ram_blocks;
-
-    assert(rdma->blockmap == NULL);
-    rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
-    memset(local, 0, sizeof *local);
-    qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
-    DPRINTF("Allocated %d local ram block structures\n", local->nb_blocks);
-    rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) *
-                        rdma->local_ram_blocks.nb_blocks);
-    local->init = true;
-    return 0;
-}
-
-static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset)
-{
-    RDMALocalBlocks *local = &rdma->local_ram_blocks;
-    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
-        (void *) block_offset);
-    RDMALocalBlock *old = local->block;
-    int x;
-
-    assert(block);
-
-    if (block->pmr) {
-        int j;
-
-        for (j = 0; j < block->nb_chunks; j++) {
-            if (!block->pmr[j]) {
-                continue;
-            }
-            ibv_dereg_mr(block->pmr[j]);
-            rdma->total_registrations--;
-        }
-        g_free(block->pmr);
-        block->pmr = NULL;
-    }
-
-    if (block->mr) {
-        ibv_dereg_mr(block->mr);
-        rdma->total_registrations--;
-        block->mr = NULL;
-    }
-
-    g_free(block->transit_bitmap);
-    block->transit_bitmap = NULL;
-
-    g_free(block->unregister_bitmap);
-    block->unregister_bitmap = NULL;
-
-    g_free(block->remote_keys);
-    block->remote_keys = NULL;
-
-    for (x = 0; x < local->nb_blocks; x++) {
-        g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
-    }
-
-    if (local->nb_blocks > 1) {
-
-        local->block = g_malloc0(sizeof(RDMALocalBlock) *
-                                    (local->nb_blocks - 1));
-
-        if (block->index) {
-            memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
-        }
-
-        if (block->index < (local->nb_blocks - 1)) {
-            memcpy(local->block + block->index, old + (block->index + 1),
-                sizeof(RDMALocalBlock) *
-                    (local->nb_blocks - (block->index + 1)));
-        }
-    } else {
-        assert(block == local->block);
-        local->block = NULL;
-    }
-
-    DDPRINTF("Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
-           " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
-            local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
-            block->length, (uint64_t) (block->local_host_addr + block->length),
-                BITS_TO_LONGS(block->nb_chunks) *
-                    sizeof(unsigned long) * 8, block->nb_chunks);
-
-    g_free(old);
-
-    local->nb_blocks--;
-
-    if (local->nb_blocks) {
-        for (x = 0; x < local->nb_blocks; x++) {
-            g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset,
-                                                &local->block[x]);
-        }
-    }
-
-    return 0;
-}
-
-/*
- * Put in the log file which RDMA device was opened and the details
- * associated with that device.
- */
-static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
-{
-    struct ibv_port_attr port;
-
-    if (ibv_query_port(verbs, 1, &port)) {
-        fprintf(stderr, "FAILED TO QUERY PORT INFORMATION!\n");
-        return;
-    }
-
-    printf("%s RDMA Device opened: kernel name %s "
-           "uverbs device name %s, "
-           "infiniband_verbs class device path %s, "
-           "infiniband class device path %s, "
-           "transport: (%d) %s\n",
-                who,
-                verbs->device->name,
-                verbs->device->dev_name,
-                verbs->device->dev_path,
-                verbs->device->ibdev_path,
-                port.link_layer,
-                (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" :
-                 ((port.link_layer == IBV_LINK_LAYER_ETHERNET) 
-                    ? "Ethernet" : "Unknown"));
-}
-
-/*
- * Put in the log file the RDMA gid addressing information,
- * useful for folks who have trouble understanding the
- * RDMA device hierarchy in the kernel.
- */
-static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
-{
-    char sgid[33];
-    char dgid[33];
-    inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
-    inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
-    DPRINTF("%s Source GID: %s, Dest GID: %s\n", who, sgid, dgid);
-}
-
-/*
- * As of now, IPv6 over RoCE / iWARP is not supported by linux.
- * We will try the next addrinfo struct, and fail if there are
- * no other valid addresses to bind against.
- *
- * If user is listening on '[::]', then we will not have a opened a device
- * yet and have no way of verifying if the device is RoCE or not.
- *
- * In this case, the source VM will throw an error for ALL types of
- * connections (both IPv4 and IPv6) if the destination machine does not have
- * a regular infiniband network available for use.
- *
- * The only way to guarantee that an error is thrown for broken kernels is
- * for the management software to choose a *specific* interface at bind time
- * and validate what time of hardware it is.
- *
- * Unfortunately, this puts the user in a fix:
- * 
- *  If the source VM connects with an IPv4 address without knowing that the
- *  destination has bound to '[::]' the migration will unconditionally fail
- *  unless the management software is explicitly listening on the the IPv4
- *  address while using a RoCE-based device.
- *
- *  If the source VM connects with an IPv6 address, then we're OK because we can
- *  throw an error on the source (and similarly on the destination).
- * 
- *  But in mixed environments, this will be broken for a while until it is fixed
- *  inside linux.
- *
- * We do provide a *tiny* bit of help in this function: We can list all of the
- * devices in the system and check to see if all the devices are RoCE or
- * Infiniband. 
- *
- * If we detect that we have a *pure* RoCE environment, then we can safely
- * thrown an error even if the management software has specified '[::]' as the
- * bind address.
- *
- * However, if there is are multiple hetergeneous devices, then we cannot make
- * this assumption and the user just has to be sure they know what they are
- * doing.
- *
- * Patches are being reviewed on linux-rdma.
- */
-static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs)
-{
-    struct ibv_port_attr port_attr;
-
-    /* This bug only exists in linux, to our knowledge. */
-#ifdef CONFIG_LINUX
-
-    /* 
-     * Verbs are only NULL if management has bound to '[::]'.
-     * 
-     * Let's iterate through all the devices and see if there any pure IB
-     * devices (non-ethernet).
-     * 
-     * If not, then we can safely proceed with the migration.
-     * Otherwise, there are no guarantees until the bug is fixed in linux.
-     */
-    if (!verbs) {
-           int num_devices, x;
-        struct ibv_device ** dev_list = ibv_get_device_list(&num_devices);
-        bool roce_found = false;
-        bool ib_found = false;
-
-        for (x = 0; x < num_devices; x++) {
-            verbs = ibv_open_device(dev_list[x]);
-
-            if (ibv_query_port(verbs, 1, &port_attr)) {
-                ibv_close_device(verbs);
-                ERROR(errp, "Could not query initial IB port");
-                return -EINVAL;
-            }
-
-            if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
-                ib_found = true;
-            } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
-                roce_found = true;
-            }
-
-            ibv_close_device(verbs);
-
-        }
-
-        if (roce_found) {
-            if (ib_found) {
-                fprintf(stderr, "WARN: migrations may fail:"
-                                " IPv6 over RoCE / iWARP in linux"
-                                " is broken. But since you appear to have a"
-                                " mixed RoCE / IB environment, be sure to only"
-                                " migrate over the IB fabric until the kernel "
-                                " fixes the bug.\n");
-            } else {
-                ERROR(errp, "You only have RoCE / iWARP devices in your systems"
-                            " and your management software has specified '[::]'"
-                            ", but IPv6 over RoCE / iWARP is not supported in Linux.");
-                return -ENONET;
-            }
-        }
-
-        return 0;
-    }
-
-    /*
-     * If we have a verbs context, that means that some other than '[::]' was
-     * used by the management software for binding. In which case we can actually 
-     * warn the user about a potential broken kernel;
-     */
-
-    /* IB ports start with 1, not 0 */
-    if (ibv_query_port(verbs, 1, &port_attr)) {
-        ERROR(errp, "Could not query initial IB port");
-        return -EINVAL;
-    }
-
-    if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
-        ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 "
-                    "(but patches on linux-rdma in progress)");
-        return -ENONET;
-    }
-
-#endif
-
-    return 0;
-}
-
-/*
- * Figure out which RDMA device corresponds to the requested IP hostname
- * Also create the initial connection manager identifiers for opening
- * the connection.
- */
-static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
-{
-    int ret;
-    struct rdma_addrinfo *res;
-    char port_str[16];
-    struct rdma_cm_event *cm_event;
-    char ip[40] = "unknown";
-    struct rdma_addrinfo *e;
-
-    if (rdma->host == NULL || !strcmp(rdma->host, "")) {
-        ERROR(errp, "RDMA hostname has not been set");
-        return -EINVAL;
-    }
-
-    /* create CM channel */
-    rdma->channel = rdma_create_event_channel();
-    if (!rdma->channel) {
-        ERROR(errp, "could not create CM channel");
-        return -EINVAL;
-    }
-
-    /* create CM id */
-    ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
-    if (ret) {
-        ERROR(errp, "could not create channel id");
-        goto err_resolve_create_id;
-    }
-
-    snprintf(port_str, 16, "%d", rdma->port);
-    port_str[15] = '\0';
-
-    ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
-    if (ret < 0) {
-        ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
-        goto err_resolve_get_addr;
-    }
-
-    for (e = res; e != NULL; e = e->ai_next) {
-        inet_ntop(e->ai_family,
-            &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
-        DPRINTF("Trying %s => %s\n", rdma->host, ip);
-
-        ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
-                RDMA_RESOLVE_TIMEOUT_MS);
-        if (!ret) {
-            if (e->ai_family == AF_INET6) {
-                ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
-                if (ret) {
-                    continue;
-                }
-            }
-            goto route;
-        }
-    }
-
-    ERROR(errp, "could not resolve address %s", rdma->host);
-    goto err_resolve_get_addr;
-
-route:
-    qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
-
-    ret = rdma_get_cm_event(rdma->channel, &cm_event);
-    if (ret) {
-        ERROR(errp, "could not perform event_addr_resolved");
-        goto err_resolve_get_addr;
-    }
-
-    if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
-        ERROR(errp, "result not equal to event_addr_resolved %s",
-                rdma_event_str(cm_event->event));
-        perror("rdma_resolve_addr");
-        rdma_ack_cm_event(cm_event);
-        ret = -EINVAL;
-        goto err_resolve_get_addr;
-    }
-    rdma_ack_cm_event(cm_event);
-
-    /* resolve route */
-    ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
-    if (ret) {
-        ERROR(errp, "could not resolve rdma route");
-        goto err_resolve_get_addr;
-    }
-
-    ret = rdma_get_cm_event(rdma->channel, &cm_event);
-    if (ret) {
-        ERROR(errp, "could not perform event_route_resolved");
-        goto err_resolve_get_addr;
-    }
-    if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
-        ERROR(errp, "result not equal to event_route_resolved: %s",
-                        rdma_event_str(cm_event->event));
-        rdma_ack_cm_event(cm_event);
-        ret = -EINVAL;
-        goto err_resolve_get_addr;
-    }
-    rdma_ack_cm_event(cm_event);
-    rdma->verbs = rdma->cm_id->verbs;
-    qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs);
-    qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id);
-    return 0;
-
-err_resolve_get_addr:
-    rdma_destroy_id(rdma->cm_id);
-    rdma->cm_id = NULL;
-err_resolve_create_id:
-    rdma_destroy_event_channel(rdma->channel);
-    rdma->channel = NULL;
-    return ret;
-}
-
-/*
- * Create protection domain and completion queues
- */
-static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
-{
-    /* allocate pd */
-    rdma->pd = ibv_alloc_pd(rdma->verbs);
-    if (!rdma->pd) {
-        fprintf(stderr, "failed to allocate protection domain\n");
-        return -1;
-    }
-
-    /* create completion channel */
-    rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
-    if (!rdma->comp_channel) {
-        fprintf(stderr, "failed to allocate completion channel\n");
-        goto err_alloc_pd_cq;
-    }
-
-    /*
-     * Completion queue can be filled by both read and write work requests,
-     * so must reflect the sum of both possible queue sizes.
-     */
-    rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
-            NULL, rdma->comp_channel, 0);
-    if (!rdma->cq) {
-        fprintf(stderr, "failed to allocate completion queue\n");
-        goto err_alloc_pd_cq;
-    }
-
-    return 0;
-
-err_alloc_pd_cq:
-    if (rdma->pd) {
-        ibv_dealloc_pd(rdma->pd);
-    }
-    if (rdma->comp_channel) {
-        ibv_destroy_comp_channel(rdma->comp_channel);
-    }
-    rdma->pd = NULL;
-    rdma->comp_channel = NULL;
-    return -1;
-
-}
-
-/*
- * Create queue pairs.
- */
-static int qemu_rdma_alloc_qp(RDMAContext *rdma)
-{
-    struct ibv_qp_init_attr attr = { 0 };
-    int ret;
-
-    attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX;
-    attr.cap.max_recv_wr = 3;
-    attr.cap.max_send_sge = 1;
-    attr.cap.max_recv_sge = 1;
-    attr.send_cq = rdma->cq;
-    attr.recv_cq = rdma->cq;
-    attr.qp_type = IBV_QPT_RC;
-
-    ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
-    if (ret) {
-        return -1;
-    }
-
-    rdma->qp = rdma->cm_id->qp;
-    return 0;
-}
-
-static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
-{
-    int i;
-    RDMALocalBlocks *local = &rdma->local_ram_blocks;
-
-    for (i = 0; i < local->nb_blocks; i++) {
-        local->block[i].mr =
-            ibv_reg_mr(rdma->pd,
-                    local->block[i].local_host_addr,
-                    local->block[i].length,
-                    IBV_ACCESS_LOCAL_WRITE |
-                    IBV_ACCESS_REMOTE_WRITE
-                    );
-        if (!local->block[i].mr) {
-            perror("Failed to register local dest ram block!\n");
-            break;
-        }
-        rdma->total_registrations++;
-    }
-
-    if (i >= local->nb_blocks) {
-        return 0;
-    }
-
-    for (i--; i >= 0; i--) {
-        ibv_dereg_mr(local->block[i].mr);
-        rdma->total_registrations--;
-    }
-
-    return -1;
-
-}
-
-/*
- * Find the ram block that corresponds to the page requested to be
- * transmitted by QEMU.
- *
- * Once the block is found, also identify which 'chunk' within that
- * block that the page belongs to.
- *
- * This search cannot fail or the migration will fail.
- */
-static int qemu_rdma_search_ram_block(RDMAContext *rdma,
-                                      uint64_t block_offset,
-                                      uint64_t offset,
-                                      uint64_t length,
-                                      uint64_t *block_index,
-                                      uint64_t *chunk_index)
-{
-    uint64_t current_addr = block_offset + offset;
-    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
-                                                (void *) block_offset);
-    assert(block);
-    assert(current_addr >= block->offset);
-    assert((current_addr + length) <= (block->offset + block->length));
-
-    *block_index = block->index;
-    *chunk_index = ram_chunk_index(block->local_host_addr,
-                block->local_host_addr + (current_addr - block->offset));
-
-    return 0;
-}
-
-/*
- * Register a chunk with IB. If the chunk was already registered
- * previously, then skip.
- *
- * Also return the keys associated with the registration needed
- * to perform the actual RDMA operation.
- */
-static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
-        RDMALocalBlock *block, uint8_t *host_addr,
-        uint32_t *lkey, uint32_t *rkey, int chunk,
-        uint8_t *chunk_start, uint8_t *chunk_end)
-{
-    if (block->mr) {
-        if (lkey) {
-            *lkey = block->mr->lkey;
-        }
-        if (rkey) {
-            *rkey = block->mr->rkey;
-        }
-        return 0;
-    }
-
-    /* allocate memory to store chunk MRs */
-    if (!block->pmr) {
-        block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
-        if (!block->pmr) {
-            return -1;
-        }
-    }
-
-    /*
-     * If 'rkey', then we're the destination, so grant access to the source.
-     *
-     * If 'lkey', then we're the source VM, so grant access only to ourselves.
-     */
-    if (!block->pmr[chunk]) {
-        uint64_t len = chunk_end - chunk_start;
-
-        DDPRINTF("Registering %" PRIu64 " bytes @ %p\n",
-                 len, chunk_start);
-
-        block->pmr[chunk] = ibv_reg_mr(rdma->pd,
-                chunk_start, len,
-                (rkey ? (IBV_ACCESS_LOCAL_WRITE |
-                        IBV_ACCESS_REMOTE_WRITE) : 0));
-
-        if (!block->pmr[chunk]) {
-            perror("Failed to register chunk!");
-            fprintf(stderr, "Chunk details: block: %d chunk index %d"
-                            " start %" PRIu64 " end %" PRIu64 " host %" PRIu64
-                            " local %" PRIu64 " registrations: %d\n",
-                            block->index, chunk, (uint64_t) chunk_start,
-                            (uint64_t) chunk_end, (uint64_t) host_addr,
-                            (uint64_t) block->local_host_addr,
-                            rdma->total_registrations);
-            return -1;
-        }
-        rdma->total_registrations++;
-    }
-
-    if (lkey) {
-        *lkey = block->pmr[chunk]->lkey;
-    }
-    if (rkey) {
-        *rkey = block->pmr[chunk]->rkey;
-    }
-    return 0;
-}
-
-/*
- * Register (at connection time) the memory used for control
- * channel messages.
- */
-static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
-{
-    rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd,
-            rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER,
-            IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
-    if (rdma->wr_data[idx].control_mr) {
-        rdma->total_registrations++;
-        return 0;
-    }
-    fprintf(stderr, "qemu_rdma_reg_control failed!\n");
-    return -1;
-}
-
-const char *print_wrid(int wrid)
-{
-    if (wrid >= RDMA_WRID_RECV_CONTROL) {
-        return wrid_desc[RDMA_WRID_RECV_CONTROL];
-    }
-    return wrid_desc[wrid];
-}
-
-/*
- * RDMA requires memory registration (mlock/pinning), but this is not good for
- * overcommitment.
- *
- * In preparation for the future where LRU information or workload-specific
- * writable writable working set memory access behavior is available to QEMU
- * it would be nice to have in place the ability to UN-register/UN-pin
- * particular memory regions from the RDMA hardware when it is determine that
- * those regions of memory will likely not be accessed again in the near future.
- *
- * While we do not yet have such information right now, the following
- * compile-time option allows us to perform a non-optimized version of this
- * behavior.
- *
- * By uncommenting this option, you will cause *all* RDMA transfers to be
- * unregistered immediately after the transfer completes on both sides of the
- * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
- *
- * This will have a terrible impact on migration performance, so until future
- * workload information or LRU information is available, do not attempt to use
- * this feature except for basic testing.
- */
-//#define RDMA_UNREGISTRATION_EXAMPLE
-
-/*
- * Perform a non-optimized memory unregistration after every transfer
- * for demonsration purposes, only if pin-all is not requested.
- *
- * Potential optimizations:
- * 1. Start a new thread to run this function continuously
-        - for bit clearing
-        - and for receipt of unregister messages
- * 2. Use an LRU.
- * 3. Use workload hints.
- */
-static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
-{
-    while (rdma->unregistrations[rdma->unregister_current]) {
-        int ret;
-        uint64_t wr_id = rdma->unregistrations[rdma->unregister_current];
-        uint64_t chunk =
-            (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
-        uint64_t index =
-            (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
-        RDMALocalBlock *block =
-            &(rdma->local_ram_blocks.block[index]);
-        RDMARegister reg = { .current_index = index };
-        RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED,
-                                 };
-        RDMAControlHeader head = { .len = sizeof(RDMARegister),
-                                   .type = RDMA_CONTROL_UNREGISTER_REQUEST,
-                                   .repeat = 1,
-                                 };
-
-        DDPRINTF("Processing unregister for chunk: %" PRIu64
-                 " at position %d\n", chunk, rdma->unregister_current);
-
-        rdma->unregistrations[rdma->unregister_current] = 0;
-        rdma->unregister_current++;
-
-        if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) {
-            rdma->unregister_current = 0;
-        }
-
-
-        /*
-         * Unregistration is speculative (because migration is single-threaded
-         * and we cannot break the protocol's inifinband message ordering).
-         * Thus, if the memory is currently being used for transmission,
-         * then abort the attempt to unregister and try again
-         * later the next time a completion is received for this memory.
-         */
-        clear_bit(chunk, block->unregister_bitmap);
-
-        if (test_bit(chunk, block->transit_bitmap)) {
-            DDPRINTF("Cannot unregister inflight chunk: %" PRIu64 "\n", chunk);
-            continue;
-        }
-
-        DDPRINTF("Sending unregister for chunk: %" PRIu64 "\n", chunk);
-
-        ret = ibv_dereg_mr(block->pmr[chunk]);
-        block->pmr[chunk] = NULL;
-        block->remote_keys[chunk] = 0;
-
-        if (ret != 0) {
-            perror("unregistration chunk failed");
-            return -ret;
-        }
-        rdma->total_registrations--;
-
-        reg.key.chunk = chunk;
-        register_to_network(&reg);
-        ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
-                                &resp, NULL, NULL);
-        if (ret < 0) {
-            return ret;
-        }
-
-        DDPRINTF("Unregister for chunk: %" PRIu64 " complete.\n", chunk);
-    }
-
-    return 0;
-}
-
-static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
-                                         uint64_t chunk)
-{
-    uint64_t result = wr_id & RDMA_WRID_TYPE_MASK;
-
-    result |= (index << RDMA_WRID_BLOCK_SHIFT);
-    result |= (chunk << RDMA_WRID_CHUNK_SHIFT);
-
-    return result;
-}
-
-/*
- * Set bit for unregistration in the next iteration.
- * We cannot transmit right here, but will unpin later.
- */
-static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
-                                        uint64_t chunk, uint64_t wr_id)
-{
-    if (rdma->unregistrations[rdma->unregister_next] != 0) {
-        fprintf(stderr, "rdma migration: queue is full!\n");
-    } else {
-        RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
-
-        if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
-            DDPRINTF("Appending unregister chunk %" PRIu64
-                    " at position %d\n", chunk, rdma->unregister_next);
-
-            rdma->unregistrations[rdma->unregister_next++] =
-                    qemu_rdma_make_wrid(wr_id, index, chunk);
-
-            if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
-                rdma->unregister_next = 0;
-            }
-        } else {
-            DDPRINTF("Unregister chunk %" PRIu64 " already in queue.\n",
-                    chunk);
-        }
-    }
-}
-
-/*
- * Consult the connection manager to see a work request
- * (of any kind) has completed.
- * Return the work request ID that completed.
- */
-static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
-                               uint32_t *byte_len)
-{
-    int ret;
-    struct ibv_wc wc;
-    uint64_t wr_id;
-
-    ret = ibv_poll_cq(rdma->cq, 1, &wc);
-
-    if (!ret) {
-        *wr_id_out = RDMA_WRID_NONE;
-        return 0;
-    }
-
-    if (ret < 0) {
-        fprintf(stderr, "ibv_poll_cq return %d!\n", ret);
-        return ret;
-    }
-
-    wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
-
-    if (wc.status != IBV_WC_SUCCESS) {
-        fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n",
-                        wc.status, ibv_wc_status_str(wc.status));
-        fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]);
-
-        return -1;
-    }
-
-    if (rdma->control_ready_expected &&
-        (wr_id >= RDMA_WRID_RECV_CONTROL)) {
-        DDDPRINTF("completion %s #%" PRId64 " received (%" PRId64 ")"
-                  " left %d\n", wrid_desc[RDMA_WRID_RECV_CONTROL],
-                  wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
-        rdma->control_ready_expected = 0;
-    }
-
-    if (wr_id == RDMA_WRID_RDMA_WRITE) {
-        uint64_t chunk =
-            (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
-        uint64_t index =
-            (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
-        RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
-
-        DDDPRINTF("completions %s (%" PRId64 ") left %d, "
-                 "block %" PRIu64 ", chunk: %" PRIu64 " %p %p\n",
-                 print_wrid(wr_id), wr_id, rdma->nb_sent, index, chunk,
-                 block->local_host_addr, (void *)block->remote_host_addr);
-
-        clear_bit(chunk, block->transit_bitmap);
-
-        if (rdma->nb_sent > 0) {
-            rdma->nb_sent--;
-        }
-
-        if (!rdma->pin_all) {
-            /*
-             * FYI: If one wanted to signal a specific chunk to be unregistered
-             * using LRU or workload-specific information, this is the function
-             * you would call to do so. That chunk would then get asynchronously
-             * unregistered later.
-             */
-#ifdef RDMA_UNREGISTRATION_EXAMPLE
-            qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id);
-#endif
-        }
-    } else {
-        DDDPRINTF("other completion %s (%" PRId64 ") received left %d\n",
-            print_wrid(wr_id), wr_id, rdma->nb_sent);
-    }
-
-    *wr_id_out = wc.wr_id;
-    if (byte_len) {
-        *byte_len = wc.byte_len;
-    }
-
-    return  0;
-}
-
-/*
- * Block until the next work request has completed.
- *
- * First poll to see if a work request has already completed,
- * otherwise block.
- *
- * If we encounter completed work requests for IDs other than
- * the one we're interested in, then that's generally an error.
- *
- * The only exception is actual RDMA Write completions. These
- * completions only need to be recorded, but do not actually
- * need further processing.
- */
-static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
-                                    uint32_t *byte_len)
-{
-    int num_cq_events = 0, ret = 0;
-    struct ibv_cq *cq;
-    void *cq_ctx;
-    uint64_t wr_id = RDMA_WRID_NONE, wr_id_in;
-
-    if (ibv_req_notify_cq(rdma->cq, 0)) {
-        return -1;
-    }
-    /* poll cq first */
-    while (wr_id != wrid_requested) {
-        ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
-        if (ret < 0) {
-            return ret;
-        }
-
-        wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
-
-        if (wr_id == RDMA_WRID_NONE) {
-            break;
-        }
-        if (wr_id != wrid_requested) {
-            DDDPRINTF("A Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
-                print_wrid(wrid_requested),
-                wrid_requested, print_wrid(wr_id), wr_id);
-        }
-    }
-
-    if (wr_id == wrid_requested) {
-        return 0;
-    }
-
-    while (1) {
-        /*
-         * Coroutine doesn't start until process_incoming_migration()
-         * so don't yield unless we know we're running inside of a coroutine.
-         */
-        if (rdma->migration_started_on_destination) {
-            yield_until_fd_readable(rdma->comp_channel->fd);
-        }
-
-        if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) {
-            perror("ibv_get_cq_event");
-            goto err_block_for_wrid;
-        }
-
-        num_cq_events++;
-
-        if (ibv_req_notify_cq(cq, 0)) {
-            goto err_block_for_wrid;
-        }
-
-        while (wr_id != wrid_requested) {
-            ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
-            if (ret < 0) {
-                goto err_block_for_wrid;
-            }
-
-            wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
-
-            if (wr_id == RDMA_WRID_NONE) {
-                break;
-            }
-            if (wr_id != wrid_requested) {
-                DDDPRINTF("B Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
-                    print_wrid(wrid_requested), wrid_requested,
-                    print_wrid(wr_id), wr_id);
-            }
-        }
-
-        if (wr_id == wrid_requested) {
-            goto success_block_for_wrid;
-        }
-    }
-
-success_block_for_wrid:
-    if (num_cq_events) {
-        ibv_ack_cq_events(cq, num_cq_events);
-    }
-    return 0;
-
-err_block_for_wrid:
-    if (num_cq_events) {
-        ibv_ack_cq_events(cq, num_cq_events);
-    }
-    return ret;
-}
-
-/*
- * Post a SEND message work request for the control channel
- * containing some data and block until the post completes.
- */
-static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
-                                       RDMAControlHeader *head)
-{
-    int ret = 0;
-    RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL];
-    struct ibv_send_wr *bad_wr;
-    struct ibv_sge sge = {
-                           .addr = (uint64_t)(wr->control),
-                           .length = head->len + sizeof(RDMAControlHeader),
-                           .lkey = wr->control_mr->lkey,
-                         };
-    struct ibv_send_wr send_wr = {
-                                   .wr_id = RDMA_WRID_SEND_CONTROL,
-                                   .opcode = IBV_WR_SEND,
-                                   .send_flags = IBV_SEND_SIGNALED,
-                                   .sg_list = &sge,
-                                   .num_sge = 1,
-                                };
-
-    DDDPRINTF("CONTROL: sending %s..\n", control_desc[head->type]);
-
-    /*
-     * We don't actually need to do a memcpy() in here if we used
-     * the "sge" properly, but since we're only sending control messages
-     * (not RAM in a performance-critical path), then its OK for now.
-     *
-     * The copy makes the RDMAControlHeader simpler to manipulate
-     * for the time being.
-     */
-    assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head));
-    memcpy(wr->control, head, sizeof(RDMAControlHeader));
-    control_to_network((void *) wr->control);
-
-    if (buf) {
-        memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len);
-    }
-
-
-    ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
-
-    if (ret > 0) {
-        fprintf(stderr, "Failed to use post IB SEND for control!\n");
-        return -ret;
-    }
-
-    ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
-    if (ret < 0) {
-        fprintf(stderr, "rdma migration: send polling control error!\n");
-    }
-
-    return ret;
-}
-
-/*
- * Post a RECV work request in anticipation of some future receipt
- * of data on the control channel.
- */
-static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx)
-{
-    struct ibv_recv_wr *bad_wr;
-    struct ibv_sge sge = {
-                            .addr = (uint64_t)(rdma->wr_data[idx].control),
-                            .length = RDMA_CONTROL_MAX_BUFFER,
-                            .lkey = rdma->wr_data[idx].control_mr->lkey,
-                         };
-
-    struct ibv_recv_wr recv_wr = {
-                                    .wr_id = RDMA_WRID_RECV_CONTROL + idx,
-                                    .sg_list = &sge,
-                                    .num_sge = 1,
-                                 };
-
-
-    if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) {
-        return -1;
-    }
-
-    return 0;
-}
-
-/*
- * Block and wait for a RECV control channel message to arrive.
- */
-static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
-                RDMAControlHeader *head, int expecting, int idx)
-{
-    uint32_t byte_len;
-    int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
-                                       &byte_len);
-
-    if (ret < 0) {
-        fprintf(stderr, "rdma migration: recv polling control error!\n");
-        return ret;
-    }
-
-    network_to_control((void *) rdma->wr_data[idx].control);
-    memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
-
-    DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]);
-
-    if (expecting == RDMA_CONTROL_NONE) {
-        DDDPRINTF("Surprise: got %s (%d)\n",
-                  control_desc[head->type], head->type);
-    } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
-        fprintf(stderr, "Was expecting a %s (%d) control message"
-                ", but got: %s (%d), length: %d\n",
-                control_desc[expecting], expecting,
-                control_desc[head->type], head->type, head->len);
-        return -EIO;
-    }
-    if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
-        fprintf(stderr, "too long length: %d\n", head->len);
-        return -EINVAL;
-    }
-    if (sizeof(*head) + head->len != byte_len) {
-        fprintf(stderr, "Malformed length: %d byte_len %d\n",
-                head->len, byte_len);
-        return -EINVAL;
-    }
-
-    return 0;
-}
-
-/*
- * When a RECV work request has completed, the work request's
- * buffer is pointed at the header.
- *
- * This will advance the pointer to the data portion
- * of the control message of the work request's buffer that
- * was populated after the work request finished.
- */
-static void qemu_rdma_move_header(RDMAContext *rdma, int idx,
-                                  RDMAControlHeader *head)
-{
-    rdma->wr_data[idx].control_len = head->len;
-    rdma->wr_data[idx].control_curr =
-        rdma->wr_data[idx].control + sizeof(RDMAControlHeader);
-}
-
-/*
- * This is an 'atomic' high-level operation to deliver a single, unified
- * control-channel message.
- *
- * Additionally, if the user is expecting some kind of reply to this message,
- * they can request a 'resp' response message be filled in by posting an
- * additional work request on behalf of the user and waiting for an additional
- * completion.
- *
- * The extra (optional) response is used during registration to us from having
- * to perform an *additional* exchange of message just to provide a response by
- * instead piggy-backing on the acknowledgement.
- */
-static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
-                                   uint8_t *data, RDMAControlHeader *resp,
-                                   int *resp_idx,
-                                   int (*callback)(RDMAContext *rdma))
-{
-    int ret = 0;
-
-    /*
-     * Wait until the dest is ready before attempting to deliver the message
-     * by waiting for a READY message.
-     */
-    if (rdma->control_ready_expected) {
-        RDMAControlHeader resp;
-        ret = qemu_rdma_exchange_get_response(rdma,
-                                    &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
-        if (ret < 0) {
-            return ret;
-        }
-    }
-
-    /*
-     * If the user is expecting a response, post a WR in anticipation of it.
-     */
-    if (resp) {
-        ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
-        if (ret) {
-            fprintf(stderr, "rdma migration: error posting"
-                    " extra control recv for anticipated result!");
-            return ret;
-        }
-    }
-
-    /*
-     * Post a WR to replace the one we just consumed for the READY message.
-     */
-    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error posting first control recv!");
-        return ret;
-    }
-
-    /*
-     * Deliver the control message that was requested.
-     */
-    ret = qemu_rdma_post_send_control(rdma, data, head);
-
-    if (ret < 0) {
-        fprintf(stderr, "Failed to send control buffer!\n");
-        return ret;
-    }
-
-    /*
-     * If we're expecting a response, block and wait for it.
-     */
-    if (resp) {
-        if (callback) {
-            DDPRINTF("Issuing callback before receiving response...\n");
-            ret = callback(rdma);
-            if (ret < 0) {
-                return ret;
-            }
-        }
-
-        DDPRINTF("Waiting for response %s\n", control_desc[resp->type]);
-        ret = qemu_rdma_exchange_get_response(rdma, resp,
-                                              resp->type, RDMA_WRID_DATA);
-
-        if (ret < 0) {
-            return ret;
-        }
-
-        qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
-        if (resp_idx) {
-            *resp_idx = RDMA_WRID_DATA;
-        }
-        DDPRINTF("Response %s received.\n", control_desc[resp->type]);
-    }
-
-    rdma->control_ready_expected = 1;
-
-    return 0;
-}
-
-/*
- * This is an 'atomic' high-level operation to receive a single, unified
- * control-channel message.
- */
-static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
-                                int expecting)
-{
-    RDMAControlHeader ready = {
-                                .len = 0,
-                                .type = RDMA_CONTROL_READY,
-                                .repeat = 1,
-                              };
-    int ret;
-
-    /*
-     * Inform the source that we're ready to receive a message.
-     */
-    ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
-
-    if (ret < 0) {
-        fprintf(stderr, "Failed to send control buffer!\n");
-        return ret;
-    }
-
-    /*
-     * Block and wait for the message.
-     */
-    ret = qemu_rdma_exchange_get_response(rdma, head,
-                                          expecting, RDMA_WRID_READY);
-
-    if (ret < 0) {
-        return ret;
-    }
-
-    qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
-
-    /*
-     * Post a new RECV work request to replace the one we just consumed.
-     */
-    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error posting second control recv!");
-        return ret;
-    }
-
-    return 0;
-}
-
-/*
- * Write an actual chunk of memory using RDMA.
- *
- * If we're using dynamic registration on the dest-side, we have to
- * send a registration command first.
- */
-static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
-                               int current_index, uint64_t current_addr,
-                               uint64_t length)
-{
-    struct ibv_sge sge;
-    struct ibv_send_wr send_wr = { 0 };
-    struct ibv_send_wr *bad_wr;
-    int reg_result_idx, ret, count = 0;
-    uint64_t chunk, chunks;
-    uint8_t *chunk_start, *chunk_end;
-    RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
-    RDMARegister reg;
-    RDMARegisterResult *reg_result;
-    RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
-    RDMAControlHeader head = { .len = sizeof(RDMARegister),
-                               .type = RDMA_CONTROL_REGISTER_REQUEST,
-                               .repeat = 1,
-                             };
-
-retry:
-    sge.addr = (uint64_t)(block->local_host_addr +
-                            (current_addr - block->offset));
-    sge.length = length;
-
-    chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr);
-    chunk_start = ram_chunk_start(block, chunk);
-
-    if (block->is_ram_block) {
-        chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
-
-        if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
-            chunks--;
-        }
-    } else {
-        chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
-
-        if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
-            chunks--;
-        }
-    }
-
-    DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n",
-        chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
-
-    chunk_end = ram_chunk_end(block, chunk + chunks);
-
-    if (!rdma->pin_all) {
-#ifdef RDMA_UNREGISTRATION_EXAMPLE
-        qemu_rdma_unregister_waiting(rdma);
-#endif
-    }
-
-    while (test_bit(chunk, block->transit_bitmap)) {
-        (void)count;
-        DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64
-                " current %" PRIu64 " len %" PRIu64 " %d %d\n",
-                count++, current_index, chunk,
-                sge.addr, length, rdma->nb_sent, block->nb_chunks);
-
-        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
-
-        if (ret < 0) {
-            fprintf(stderr, "Failed to Wait for previous write to complete "
-                    "block %d chunk %" PRIu64
-                    " current %" PRIu64 " len %" PRIu64 " %d\n",
-                    current_index, chunk, sge.addr, length, rdma->nb_sent);
-            return ret;
-        }
-    }
-
-    if (!rdma->pin_all || !block->is_ram_block) {
-        if (!block->remote_keys[chunk]) {
-            /*
-             * This chunk has not yet been registered, so first check to see
-             * if the entire chunk is zero. If so, tell the other size to
-             * memset() + madvise() the entire chunk without RDMA.
-             */
-
-            if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length)
-                   && buffer_find_nonzero_offset((void *)sge.addr,
-                                                    length) == length) {
-                RDMACompress comp = {
-                                        .offset = current_addr,
-                                        .value = 0,
-                                        .block_idx = current_index,
-                                        .length = length,
-                                    };
-
-                head.len = sizeof(comp);
-                head.type = RDMA_CONTROL_COMPRESS;
-
-                DDPRINTF("Entire chunk is zero, sending compress: %"
-                    PRIu64 " for %d "
-                    "bytes, index: %d, offset: %" PRId64 "...\n",
-                    chunk, sge.length, current_index, current_addr);
-
-                compress_to_network(&comp);
-                ret = qemu_rdma_exchange_send(rdma, &head,
-                                (uint8_t *) &comp, NULL, NULL, NULL);
-
-                if (ret < 0) {
-                    return -EIO;
-                }
-
-                acct_update_position(f, sge.length, true);
-
-                return 1;
-            }
-
-            /*
-             * Otherwise, tell other side to register.
-             */
-            reg.current_index = current_index;
-            if (block->is_ram_block) {
-                reg.key.current_addr = current_addr;
-            } else {
-                reg.key.chunk = chunk;
-            }
-            reg.chunks = chunks;
-
-            DDPRINTF("Sending registration request chunk %" PRIu64 " for %d "
-                    "bytes, index: %d, offset: %" PRId64 "...\n",
-                    chunk, sge.length, current_index, current_addr);
-
-            register_to_network(&reg);
-            ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
-                                    &resp, &reg_result_idx, NULL);
-            if (ret < 0) {
-                return ret;
-            }
-
-            /* try to overlap this single registration with the one we sent. */
-            if (qemu_rdma_register_and_get_keys(rdma, block,
-                                                (uint8_t *) sge.addr,
-                                                &sge.lkey, NULL, chunk,
-                                                chunk_start, chunk_end)) {
-                fprintf(stderr, "cannot get lkey!\n");
-                return -EINVAL;
-            }
-
-            reg_result = (RDMARegisterResult *)
-                    rdma->wr_data[reg_result_idx].control_curr;
-
-            network_to_result(reg_result);
-
-            DDPRINTF("Received registration result:"
-                    " my key: %x their key %x, chunk %" PRIu64 "\n",
-                    block->remote_keys[chunk], reg_result->rkey, chunk);
-
-            block->remote_keys[chunk] = reg_result->rkey;
-            block->remote_host_addr = reg_result->host_addr;
-        } else {
-            /* already registered before */
-            if (qemu_rdma_register_and_get_keys(rdma, block,
-                                                (uint8_t *)sge.addr,
-                                                &sge.lkey, NULL, chunk,
-                                                chunk_start, chunk_end)) {
-                fprintf(stderr, "cannot get lkey!\n");
-                return -EINVAL;
-            }
-        }
-
-        send_wr.wr.rdma.rkey = block->remote_keys[chunk];
-    } else {
-        send_wr.wr.rdma.rkey = block->remote_rkey;
-
-        if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
-                                                     &sge.lkey, NULL, chunk,
-                                                     chunk_start, chunk_end)) {
-            fprintf(stderr, "cannot get lkey!\n");
-            return -EINVAL;
-        }
-    }
-
-    /*
-     * Encode the ram block index and chunk within this wrid.
-     * We will use this information at the time of completion
-     * to figure out which bitmap to check against and then which
-     * chunk in the bitmap to look for.
-     */
-    send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
-                                        current_index, chunk);
-
-    send_wr.opcode = IBV_WR_RDMA_WRITE;
-    send_wr.send_flags = IBV_SEND_SIGNALED;
-    send_wr.sg_list = &sge;
-    send_wr.num_sge = 1;
-    send_wr.wr.rdma.remote_addr = block->remote_host_addr +
-                                (current_addr - block->offset);
-
-    DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx"
-              " remote: %lx, bytes %" PRIu32 "\n",
-              chunk, sge.addr, send_wr.wr.rdma.remote_addr,
-              sge.length);
-
-    /*
-     * ibv_post_send() does not return negative error numbers,
-     * per the specification they are positive - no idea why.
-     */
-    ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
-
-    if (ret == ENOMEM) {
-        DDPRINTF("send queue is full. wait a little....\n");
-        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
-        if (ret < 0) {
-            fprintf(stderr, "rdma migration: failed to make "
-                            "room in full send queue! %d\n", ret);
-            return ret;
-        }
-
-        goto retry;
-
-    } else if (ret > 0) {
-        perror("rdma migration: post rdma write failed");
-        return -ret;
-    }
-
-    set_bit(chunk, block->transit_bitmap);
-    acct_update_position(f, sge.length, false);
-    rdma->total_writes++;
-
-    return 0;
-}
-
-/*
- * Push out any unwritten RDMA operations.
- *
- * We support sending out multiple chunks at the same time.
- * Not all of them need to get signaled in the completion queue.
- */
-static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
-{
-    int ret;
-
-    if (!rdma->current_length) {
-        return 0;
-    }
-
-    ret = qemu_rdma_write_one(f, rdma,
-            rdma->current_index, rdma->current_addr, rdma->current_length);
-
-    if (ret < 0) {
-        return ret;
-    }
-
-    if (ret == 0) {
-        rdma->nb_sent++;
-        DDDPRINTF("sent total: %d\n", rdma->nb_sent);
-    }
-
-    rdma->current_length = 0;
-    rdma->current_addr = 0;
-
-    return 0;
-}
-
-static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
-                    uint64_t offset, uint64_t len)
-{
-    RDMALocalBlock *block;
-    uint8_t *host_addr;
-    uint8_t *chunk_end;
-
-    if (rdma->current_index < 0) {
-        return 0;
-    }
-
-    if (rdma->current_chunk < 0) {
-        return 0;
-    }
-
-    block = &(rdma->local_ram_blocks.block[rdma->current_index]);
-    host_addr = block->local_host_addr + (offset - block->offset);
-    chunk_end = ram_chunk_end(block, rdma->current_chunk);
-
-    if (rdma->current_length == 0) {
-        return 0;
-    }
-
-    /*
-     * Only merge into chunk sequentially.
-     */
-    if (offset != (rdma->current_addr + rdma->current_length)) {
-        return 0;
-    }
-
-    if (offset < block->offset) {
-        return 0;
-    }
-
-    if ((offset + len) > (block->offset + block->length)) {
-        return 0;
-    }
-
-    if ((host_addr + len) > chunk_end) {
-        return 0;
-    }
-
-    return 1;
-}
-
-/*
- * We're not actually writing here, but doing three things:
- *
- * 1. Identify the chunk the buffer belongs to.
- * 2. If the chunk is full or the buffer doesn't belong to the current
- *    chunk, then start a new chunk and flush() the old chunk.
- * 3. To keep the hardware busy, we also group chunks into batches
- *    and only require that a batch gets acknowledged in the completion
- *    qeueue instead of each individual chunk.
- */
-static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
-                           uint64_t block_offset, uint64_t offset,
-                           uint64_t len)
-{
-    uint64_t current_addr = block_offset + offset;
-    uint64_t index = rdma->current_index;
-    uint64_t chunk = rdma->current_chunk;
-    int ret;
-
-    /* If we cannot merge it, we flush the current buffer first. */
-    if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
-        ret = qemu_rdma_write_flush(f, rdma);
-        if (ret) {
-            return ret;
-        }
-        rdma->current_length = 0;
-        rdma->current_addr = current_addr;
-
-        ret = qemu_rdma_search_ram_block(rdma, block_offset,
-                                         offset, len, &index, &chunk);
-        if (ret) {
-            fprintf(stderr, "ram block search failed\n");
-            return ret;
-        }
-        rdma->current_index = index;
-        rdma->current_chunk = chunk;
-    }
-
-    /* merge it */
-    rdma->current_length += len;
-
-    /* flush it if buffer is too large */
-    if (rdma->current_length >= RDMA_MERGE_MAX) {
-        return qemu_rdma_write_flush(f, rdma);
-    }
-
-    return 0;
-}
-
-static void qemu_rdma_cleanup(RDMAContext *rdma)
-{
-    struct rdma_cm_event *cm_event;
-    int ret, idx;
-
-    if (rdma->cm_id && rdma->connected) {
-        if (rdma->error_state) {
-            RDMAControlHeader head = { .len = 0,
-                                       .type = RDMA_CONTROL_ERROR,
-                                       .repeat = 1,
-                                     };
-            fprintf(stderr, "Early error. Sending error.\n");
-            qemu_rdma_post_send_control(rdma, NULL, &head);
-        }
-
-        ret = rdma_disconnect(rdma->cm_id);
-        if (!ret) {
-            DDPRINTF("waiting for disconnect\n");
-            ret = rdma_get_cm_event(rdma->channel, &cm_event);
-            if (!ret) {
-                rdma_ack_cm_event(cm_event);
-            }
-        }
-        DDPRINTF("Disconnected.\n");
-        rdma->connected = false;
-    }
-
-    g_free(rdma->block);
-    rdma->block = NULL;
-
-    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
-        if (rdma->wr_data[idx].control_mr) {
-            rdma->total_registrations--;
-            ibv_dereg_mr(rdma->wr_data[idx].control_mr);
-        }
-        rdma->wr_data[idx].control_mr = NULL;
-    }
-
-    if (rdma->local_ram_blocks.block) {
-        while (rdma->local_ram_blocks.nb_blocks) {
-            __qemu_rdma_delete_block(rdma,
-                    rdma->local_ram_blocks.block->offset);
-        }
-    }
-
-    if (rdma->cq) {
-        ibv_destroy_cq(rdma->cq);
-        rdma->cq = NULL;
-    }
-    if (rdma->comp_channel) {
-        ibv_destroy_comp_channel(rdma->comp_channel);
-        rdma->comp_channel = NULL;
-    }
-    if (rdma->pd) {
-        ibv_dealloc_pd(rdma->pd);
-        rdma->pd = NULL;
-    }
-    if (rdma->listen_id) {
-        rdma_destroy_id(rdma->listen_id);
-        rdma->listen_id = NULL;
-    }
-    if (rdma->cm_id) {
-        if (rdma->qp) {
-            rdma_destroy_qp(rdma->cm_id);
-            rdma->qp = NULL;
-        }
-        rdma_destroy_id(rdma->cm_id);
-        rdma->cm_id = NULL;
-    }
-    if (rdma->channel) {
-        rdma_destroy_event_channel(rdma->channel);
-        rdma->channel = NULL;
-    }
-    g_free(rdma->host);
-    rdma->host = NULL;
-}
-
-
-static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all)
-{
-    int ret, idx;
-    Error *local_err = NULL, **temp = &local_err;
-
-    /*
-     * Will be validated against destination's actual capabilities
-     * after the connect() completes.
-     */
-    rdma->pin_all = pin_all;
-
-    ret = qemu_rdma_resolve_host(rdma, temp);
-    if (ret) {
-        goto err_rdma_source_init;
-    }
-
-    ret = qemu_rdma_alloc_pd_cq(rdma);
-    if (ret) {
-        ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()"
-                    " limits may be too low. Please check $ ulimit -a # and "
-                    "search for 'ulimit -l' in the output");
-        goto err_rdma_source_init;
-    }
-
-    ret = qemu_rdma_alloc_qp(rdma);
-    if (ret) {
-        ERROR(temp, "rdma migration: error allocating qp!");
-        goto err_rdma_source_init;
-    }
-
-    ret = qemu_rdma_init_ram_blocks(rdma);
-    if (ret) {
-        ERROR(temp, "rdma migration: error initializing ram blocks!");
-        goto err_rdma_source_init;
-    }
-
-    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
-        ret = qemu_rdma_reg_control(rdma, idx);
-        if (ret) {
-            ERROR(temp, "rdma migration: error registering %d control!",
-                                                            idx);
-            goto err_rdma_source_init;
-        }
-    }
-
-    return 0;
-
-err_rdma_source_init:
-    error_propagate(errp, local_err);
-    qemu_rdma_cleanup(rdma);
-    return -1;
-}
-
-static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
-{
-    RDMACapabilities cap = {
-                                .version = RDMA_CONTROL_VERSION_CURRENT,
-                                .flags = 0,
-                           };
-    struct rdma_conn_param conn_param = { .initiator_depth = 2,
-                                          .retry_count = 5,
-                                          .private_data = &cap,
-                                          .private_data_len = sizeof(cap),
-                                        };
-    struct rdma_cm_event *cm_event;
-    int ret;
-
-    /*
-     * Only negotiate the capability with destination if the user
-     * on the source first requested the capability.
-     */
-    if (rdma->pin_all) {
-        DPRINTF("Server pin-all memory requested.\n");
-        cap.flags |= RDMA_CAPABILITY_PIN_ALL;
-    }
-
-    caps_to_network(&cap);
-
-    ret = rdma_connect(rdma->cm_id, &conn_param);
-    if (ret) {
-        perror("rdma_connect");
-        ERROR(errp, "connecting to destination!");
-        rdma_destroy_id(rdma->cm_id);
-        rdma->cm_id = NULL;
-        goto err_rdma_source_connect;
-    }
-
-    ret = rdma_get_cm_event(rdma->channel, &cm_event);
-    if (ret) {
-        perror("rdma_get_cm_event after rdma_connect");
-        ERROR(errp, "connecting to destination!");
-        rdma_ack_cm_event(cm_event);
-        rdma_destroy_id(rdma->cm_id);
-        rdma->cm_id = NULL;
-        goto err_rdma_source_connect;
-    }
-
-    if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
-        perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
-        ERROR(errp, "connecting to destination!");
-        rdma_ack_cm_event(cm_event);
-        rdma_destroy_id(rdma->cm_id);
-        rdma->cm_id = NULL;
-        goto err_rdma_source_connect;
-    }
-    rdma->connected = true;
-
-    memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
-    network_to_caps(&cap);
-
-    /*
-     * Verify that the *requested* capabilities are supported by the destination
-     * and disable them otherwise.
-     */
-    if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) {
-        ERROR(errp, "Server cannot support pinning all memory. "
-                        "Will register memory dynamically.");
-        rdma->pin_all = false;
-    }
-
-    DPRINTF("Pin all memory: %s\n", rdma->pin_all ? "enabled" : "disabled");
-
-    rdma_ack_cm_event(cm_event);
-
-    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
-    if (ret) {
-        ERROR(errp, "posting second control recv!");
-        goto err_rdma_source_connect;
-    }
-
-    rdma->control_ready_expected = 1;
-    rdma->nb_sent = 0;
-    return 0;
-
-err_rdma_source_connect:
-    qemu_rdma_cleanup(rdma);
-    return -1;
-}
-
-static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
-{
-    int ret = -EINVAL, idx;
-    struct rdma_cm_id *listen_id;
-    char ip[40] = "unknown";
-    struct rdma_addrinfo *res;
-    char port_str[16];
-
-    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
-        rdma->wr_data[idx].control_len = 0;
-        rdma->wr_data[idx].control_curr = NULL;
-    }
-
-    if (rdma->host == NULL) {
-        ERROR(errp, "RDMA host is not set!");
-        rdma->error_state = -EINVAL;
-        return -1;
-    }
-    /* create CM channel */
-    rdma->channel = rdma_create_event_channel();
-    if (!rdma->channel) {
-        ERROR(errp, "could not create rdma event channel");
-        rdma->error_state = -EINVAL;
-        return -1;
-    }
-
-    /* create CM id */
-    ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
-    if (ret) {
-        ERROR(errp, "could not create cm_id!");
-        goto err_dest_init_create_listen_id;
-    }
-
-    snprintf(port_str, 16, "%d", rdma->port);
-    port_str[15] = '\0';
-
-    if (rdma->host && strcmp("", rdma->host)) {
-        struct rdma_addrinfo *e;
-
-        ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
-        if (ret < 0) {
-            ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
-            goto err_dest_init_bind_addr;
-        }
-
-        for (e = res; e != NULL; e = e->ai_next) {
-            inet_ntop(e->ai_family,
-                &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
-            DPRINTF("Trying %s => %s\n", rdma->host, ip);
-            ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
-            if (!ret) {
-                if (e->ai_family == AF_INET6) {
-                    ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs);
-                    if (ret) {
-                        continue;
-                    }
-                }
-                    
-                goto listen;
-            }
-        }
-
-        ERROR(errp, "Error: could not rdma_bind_addr!");
-        goto err_dest_init_bind_addr;
-    } else {
-        ERROR(errp, "migration host and port not specified!");
-        ret = -EINVAL;
-        goto err_dest_init_bind_addr;
-    }
-listen:
-
-    rdma->listen_id = listen_id;
-    qemu_rdma_dump_gid("dest_init", listen_id);
-    return 0;
-
-err_dest_init_bind_addr:
-    rdma_destroy_id(listen_id);
-err_dest_init_create_listen_id:
-    rdma_destroy_event_channel(rdma->channel);
-    rdma->channel = NULL;
-    rdma->error_state = ret;
-    return ret;
-
-}
-
-static void *qemu_rdma_data_init(const char *host_port, Error **errp)
-{
-    RDMAContext *rdma = NULL;
-    InetSocketAddress *addr;
-
-    if (host_port) {
-        rdma = g_malloc0(sizeof(RDMAContext));
-        memset(rdma, 0, sizeof(RDMAContext));
-        rdma->current_index = -1;
-        rdma->current_chunk = -1;
-
-        addr = inet_parse(host_port, NULL);
-        if (addr != NULL) {
-            rdma->port = atoi(addr->port);
-            rdma->host = g_strdup(addr->host);
-        } else {
-            ERROR(errp, "bad RDMA migration address '%s'", host_port);
-            g_free(rdma);
-            rdma = NULL;
-        }
-
-        qapi_free_InetSocketAddress(addr);
-    }
-
-    return rdma;
-}
-
-/*
- * QEMUFile interface to the control channel.
- * SEND messages for control only.
- * VM's ram is handled with regular RDMA messages.
- */
-static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
-                                int64_t pos, int size)
-{
-    QEMUFileRDMA *r = opaque;
-    QEMUFile *f = r->file;
-    RDMAContext *rdma = r->rdma;
-    size_t remaining = size;
-    uint8_t * data = (void *) buf;
-    int ret;
-
-    CHECK_ERROR_STATE();
-
-    /*
-     * Push out any writes that
-     * we're queued up for VM's ram.
-     */
-    ret = qemu_rdma_write_flush(f, rdma);
-    if (ret < 0) {
-        rdma->error_state = ret;
-        return ret;
-    }
-
-    while (remaining) {
-        RDMAControlHeader head;
-
-        r->len = MIN(remaining, RDMA_SEND_INCREMENT);
-        remaining -= r->len;
-
-        head.len = r->len;
-        head.type = RDMA_CONTROL_QEMU_FILE;
-
-        ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
-
-        if (ret < 0) {
-            rdma->error_state = ret;
-            return ret;
-        }
-
-        data += r->len;
-    }
-
-    return size;
-}
-
-static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
-                             int size, int idx)
-{
-    size_t len = 0;
-
-    if (rdma->wr_data[idx].control_len) {
-        DDDPRINTF("RDMA %" PRId64 " of %d bytes already in buffer\n",
-                    rdma->wr_data[idx].control_len, size);
-
-        len = MIN(size, rdma->wr_data[idx].control_len);
-        memcpy(buf, rdma->wr_data[idx].control_curr, len);
-        rdma->wr_data[idx].control_curr += len;
-        rdma->wr_data[idx].control_len -= len;
-    }
-
-    return len;
-}
-
-/*
- * QEMUFile interface to the control channel.
- * RDMA links don't use bytestreams, so we have to
- * return bytes to QEMUFile opportunistically.
- */
-static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
-                                int64_t pos, int size)
-{
-    QEMUFileRDMA *r = opaque;
-    RDMAContext *rdma = r->rdma;
-    RDMAControlHeader head;
-    int ret = 0;
-
-    CHECK_ERROR_STATE();
-
-    /*
-     * First, we hold on to the last SEND message we
-     * were given and dish out the bytes until we run
-     * out of bytes.
-     */
-    r->len = qemu_rdma_fill(r->rdma, buf, size, 0);
-    if (r->len) {
-        return r->len;
-    }
-
-    /*
-     * Once we run out, we block and wait for another
-     * SEND message to arrive.
-     */
-    ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE);
-
-    if (ret < 0) {
-        rdma->error_state = ret;
-        return ret;
-    }
-
-    /*
-     * SEND was received with new bytes, now try again.
-     */
-    return qemu_rdma_fill(r->rdma, buf, size, 0);
-}
-
-/*
- * Block until all the outstanding chunks have been delivered by the hardware.
- */
-static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
-{
-    int ret;
-
-    if (qemu_rdma_write_flush(f, rdma) < 0) {
-        return -EIO;
-    }
-
-    while (rdma->nb_sent) {
-        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
-        if (ret < 0) {
-            fprintf(stderr, "rdma migration: complete polling error!\n");
-            return -EIO;
-        }
-    }
-
-    qemu_rdma_unregister_waiting(rdma);
-
-    return 0;
-}
-
-static int qemu_rdma_close(void *opaque)
-{
-    DPRINTF("Shutting down connection.\n");
-    QEMUFileRDMA *r = opaque;
-    if (r->rdma) {
-        qemu_rdma_cleanup(r->rdma);
-        g_free(r->rdma);
-    }
-    g_free(r);
-    return 0;
-}
-
-/*
- * Parameters:
- *    @offset == 0 :
- *        This means that 'block_offset' is a full virtual address that does not
- *        belong to a RAMBlock of the virtual machine and instead
- *        represents a private malloc'd memory area that the caller wishes to
- *        transfer.
- *
- *    @offset != 0 :
- *        Offset is an offset to be added to block_offset and used
- *        to also lookup the corresponding RAMBlock.
- *
- *    @size > 0 :
- *        Initiate an transfer this size.
- *
- *    @size == 0 :
- *        A 'hint' or 'advice' that means that we wish to speculatively
- *        and asynchronously unregister this memory. In this case, there is no
- *        guarantee that the unregister will actually happen, for example,
- *        if the memory is being actively transmitted. Additionally, the memory
- *        may be re-registered at any future time if a write within the same
- *        chunk was requested again, even if you attempted to unregister it
- *        here.
- *
- *    @size < 0 : TODO, not yet supported
- *        Unregister the memory NOW. This means that the caller does not
- *        expect there to be any future RDMA transfers and we just want to clean
- *        things up. This is used in case the upper layer owns the memory and
- *        cannot wait for qemu_fclose() to occur.
- *
- *    @bytes_sent : User-specificed pointer to indicate how many bytes were
- *                  sent. Usually, this will not be more than a few bytes of
- *                  the protocol because most transfers are sent asynchronously.
- */
-static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
-                                  ram_addr_t block_offset, ram_addr_t offset,
-                                  size_t size, int *bytes_sent)
-{
-    QEMUFileRDMA *rfile = opaque;
-    RDMAContext *rdma = rfile->rdma;
-    int ret;
-
-    CHECK_ERROR_STATE();
-
-    qemu_fflush(f);
-
-    if (size > 0) {
-        /*
-         * Add this page to the current 'chunk'. If the chunk
-         * is full, or the page doen't belong to the current chunk,
-         * an actual RDMA write will occur and a new chunk will be formed.
-         */
-        ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
-        if (ret < 0) {
-            fprintf(stderr, "rdma migration: write error! %d\n", ret);
-            goto err;
-        }
-
-        /*
-         * We always return 1 bytes because the RDMA
-         * protocol is completely asynchronous. We do not yet know
-         * whether an  identified chunk is zero or not because we're
-         * waiting for other pages to potentially be merged with
-         * the current chunk. So, we have to call qemu_update_position()
-         * later on when the actual write occurs.
-         */
-        if (bytes_sent) {
-            *bytes_sent = 1;
-        }
-    } else {
-        uint64_t index, chunk;
-
-        /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
-        if (size < 0) {
-            ret = qemu_rdma_drain_cq(f, rdma);
-            if (ret < 0) {
-                fprintf(stderr, "rdma: failed to synchronously drain"
-                                " completion queue before unregistration.\n");
-                goto err;
-            }
-        }
-        */
-
-        ret = qemu_rdma_search_ram_block(rdma, block_offset,
-                                         offset, size, &index, &chunk);
-
-        if (ret) {
-            fprintf(stderr, "ram block search failed\n");
-            goto err;
-        }
-
-        qemu_rdma_signal_unregister(rdma, index, chunk, 0);
-
-        /*
-         * TODO: Synchronous, guaranteed unregistration (should not occur during
-         * fast-path). Otherwise, unregisters will process on the next call to
-         * qemu_rdma_drain_cq()
-        if (size < 0) {
-            qemu_rdma_unregister_waiting(rdma);
-        }
-        */
-    }
-
-    /*
-     * Drain the Completion Queue if possible, but do not block,
-     * just poll.
-     *
-     * If nothing to poll, the end of the iteration will do this
-     * again to make sure we don't overflow the request queue.
-     */
-    while (1) {
-        uint64_t wr_id, wr_id_in;
-        int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
-        if (ret < 0) {
-            fprintf(stderr, "rdma migration: polling error! %d\n", ret);
-            goto err;
-        }
-
-        wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
-
-        if (wr_id == RDMA_WRID_NONE) {
-            break;
-        }
-    }
-
-    return RAM_SAVE_CONTROL_DELAYED;
-err:
-    rdma->error_state = ret;
-    return ret;
-}
-
-static int qemu_rdma_accept(RDMAContext *rdma)
-{
-    RDMACapabilities cap;
-    struct rdma_conn_param conn_param = {
-                                            .responder_resources = 2,
-                                            .private_data = &cap,
-                                            .private_data_len = sizeof(cap),
-                                         };
-    struct rdma_cm_event *cm_event;
-    struct ibv_context *verbs;
-    int ret = -EINVAL;
-    int idx;
-
-    ret = rdma_get_cm_event(rdma->channel, &cm_event);
-    if (ret) {
-        goto err_rdma_dest_wait;
-    }
-
-    if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
-        rdma_ack_cm_event(cm_event);
-        goto err_rdma_dest_wait;
-    }
-
-    memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
-
-    network_to_caps(&cap);
-
-    if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
-            fprintf(stderr, "Unknown source RDMA version: %d, bailing...\n",
-                            cap.version);
-            rdma_ack_cm_event(cm_event);
-            goto err_rdma_dest_wait;
-    }
-
-    /*
-     * Respond with only the capabilities this version of QEMU knows about.
-     */
-    cap.flags &= known_capabilities;
-
-    /*
-     * Enable the ones that we do know about.
-     * Add other checks here as new ones are introduced.
-     */
-    if (cap.flags & RDMA_CAPABILITY_PIN_ALL) {
-        rdma->pin_all = true;
-    }
-
-    rdma->cm_id = cm_event->id;
-    verbs = cm_event->id->verbs;
-
-    rdma_ack_cm_event(cm_event);
-
-    DPRINTF("Memory pin all: %s\n", rdma->pin_all ? "enabled" : "disabled");
-
-    caps_to_network(&cap);
-
-    DPRINTF("verbs context after listen: %p\n", verbs);
-
-    if (!rdma->verbs) {
-        rdma->verbs = verbs;
-    } else if (rdma->verbs != verbs) {
-            fprintf(stderr, "ibv context not matching %p, %p!\n",
-                    rdma->verbs, verbs);
-            goto err_rdma_dest_wait;
-    }
-
-    qemu_rdma_dump_id("dest_init", verbs);
-
-    ret = qemu_rdma_alloc_pd_cq(rdma);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error allocating pd and cq!\n");
-        goto err_rdma_dest_wait;
-    }
-
-    ret = qemu_rdma_alloc_qp(rdma);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error allocating qp!\n");
-        goto err_rdma_dest_wait;
-    }
-
-    ret = qemu_rdma_init_ram_blocks(rdma);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error initializing ram blocks!\n");
-        goto err_rdma_dest_wait;
-    }
-
-    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
-        ret = qemu_rdma_reg_control(rdma, idx);
-        if (ret) {
-            fprintf(stderr, "rdma: error registering %d control!\n", idx);
-            goto err_rdma_dest_wait;
-        }
-    }
-
-    qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL);
-
-    ret = rdma_accept(rdma->cm_id, &conn_param);
-    if (ret) {
-        fprintf(stderr, "rdma_accept returns %d!\n", ret);
-        goto err_rdma_dest_wait;
-    }
-
-    ret = rdma_get_cm_event(rdma->channel, &cm_event);
-    if (ret) {
-        fprintf(stderr, "rdma_accept get_cm_event failed %d!\n", ret);
-        goto err_rdma_dest_wait;
-    }
-
-    if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
-        fprintf(stderr, "rdma_accept not event established!\n");
-        rdma_ack_cm_event(cm_event);
-        goto err_rdma_dest_wait;
-    }
-
-    rdma_ack_cm_event(cm_event);
-    rdma->connected = true;
-
-    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
-    if (ret) {
-        fprintf(stderr, "rdma migration: error posting second control recv!\n");
-        goto err_rdma_dest_wait;
-    }
-
-    qemu_rdma_dump_gid("dest_connect", rdma->cm_id);
-
-    return 0;
-
-err_rdma_dest_wait:
-    rdma->error_state = ret;
-    qemu_rdma_cleanup(rdma);
-    return ret;
-}
-
-/*
- * During each iteration of the migration, we listen for instructions
- * by the source VM to perform dynamic page registrations before they
- * can perform RDMA operations.
- *
- * We respond with the 'rkey'.
- *
- * Keep doing this until the source tells us to stop.
- */
-static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
-                                         uint64_t flags)
-{
-    RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
-                               .type = RDMA_CONTROL_REGISTER_RESULT,
-                               .repeat = 0,
-                             };
-    RDMAControlHeader unreg_resp = { .len = 0,
-                               .type = RDMA_CONTROL_UNREGISTER_FINISHED,
-                               .repeat = 0,
-                             };
-    RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
-                                 .repeat = 1 };
-    QEMUFileRDMA *rfile = opaque;
-    RDMAContext *rdma = rfile->rdma;
-    RDMALocalBlocks *local = &rdma->local_ram_blocks;
-    RDMAControlHeader head;
-    RDMARegister *reg, *registers;
-    RDMACompress *comp;
-    RDMARegisterResult *reg_result;
-    static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE];
-    RDMALocalBlock *block;
-    void *host_addr;
-    int ret = 0;
-    int idx = 0;
-    int count = 0;
-    int i = 0;
-
-    CHECK_ERROR_STATE();
-
-    do {
-        DDDPRINTF("Waiting for next request %" PRIu64 "...\n", flags);
-
-        ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
-
-        if (ret < 0) {
-            break;
-        }
-
-        if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
-            fprintf(stderr, "rdma: Too many requests in this message (%d)."
-                            "Bailing.\n", head.repeat);
-            ret = -EIO;
-            break;
-        }
-
-        switch (head.type) {
-        case RDMA_CONTROL_COMPRESS:
-            comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
-            network_to_compress(comp);
-
-            DDPRINTF("Zapping zero chunk: %" PRId64
-                    " bytes, index %d, offset %" PRId64 "\n",
-                    comp->length, comp->block_idx, comp->offset);
-            block = &(rdma->local_ram_blocks.block[comp->block_idx]);
-
-            host_addr = block->local_host_addr +
-                            (comp->offset - block->offset);
-
-            ram_handle_compressed(host_addr, comp->value, comp->length);
-            break;
-
-        case RDMA_CONTROL_REGISTER_FINISHED:
-            DDDPRINTF("Current registrations complete.\n");
-            goto out;
-
-        case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
-            DPRINTF("Initial setup info requested.\n");
-
-            if (rdma->pin_all) {
-                ret = qemu_rdma_reg_whole_ram_blocks(rdma);
-                if (ret) {
-                    fprintf(stderr, "rdma migration: error dest "
-                                    "registering ram blocks!\n");
-                    goto out;
-                }
-            }
-
-            /*
-             * Dest uses this to prepare to transmit the RAMBlock descriptions
-             * to the source VM after connection setup.
-             * Both sides use the "remote" structure to communicate and update
-             * their "local" descriptions with what was sent.
-             */
-            for (i = 0; i < local->nb_blocks; i++) {
-                rdma->block[i].remote_host_addr =
-                    (uint64_t)(local->block[i].local_host_addr);
-
-                if (rdma->pin_all) {
-                    rdma->block[i].remote_rkey = local->block[i].mr->rkey;
-                }
-
-                rdma->block[i].offset = local->block[i].offset;
-                rdma->block[i].length = local->block[i].length;
-
-                remote_block_to_network(&rdma->block[i]);
-            }
-
-            blocks.len = rdma->local_ram_blocks.nb_blocks
-                                                * sizeof(RDMARemoteBlock);
-
-
-            ret = qemu_rdma_post_send_control(rdma,
-                                        (uint8_t *) rdma->block, &blocks);
-
-            if (ret < 0) {
-                fprintf(stderr, "rdma migration: error sending remote info!\n");
-                goto out;
-            }
-
-            break;
-        case RDMA_CONTROL_REGISTER_REQUEST:
-            DDPRINTF("There are %d registration requests\n", head.repeat);
-
-            reg_resp.repeat = head.repeat;
-            registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
-
-            for (count = 0; count < head.repeat; count++) {
-                uint64_t chunk;
-                uint8_t *chunk_start, *chunk_end;
-
-                reg = &registers[count];
-                network_to_register(reg);
-
-                reg_result = &results[count];
-
-                DDPRINTF("Registration request (%d): index %d, current_addr %"
-                         PRIu64 " chunks: %" PRIu64 "\n", count,
-                         reg->current_index, reg->key.current_addr, reg->chunks);
-
-                block = &(rdma->local_ram_blocks.block[reg->current_index]);
-                if (block->is_ram_block) {
-                    host_addr = (block->local_host_addr +
-                                (reg->key.current_addr - block->offset));
-                    chunk = ram_chunk_index(block->local_host_addr,
-                                            (uint8_t *) host_addr);
-                } else {
-                    chunk = reg->key.chunk;
-                    host_addr = block->local_host_addr +
-                        (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT));
-                }
-                chunk_start = ram_chunk_start(block, chunk);
-                chunk_end = ram_chunk_end(block, chunk + reg->chunks);
-                if (qemu_rdma_register_and_get_keys(rdma, block,
-                            (uint8_t *)host_addr, NULL, &reg_result->rkey,
-                            chunk, chunk_start, chunk_end)) {
-                    fprintf(stderr, "cannot get rkey!\n");
-                    ret = -EINVAL;
-                    goto out;
-                }
-
-                reg_result->host_addr = (uint64_t) block->local_host_addr;
-
-                DDPRINTF("Registered rkey for this request: %x\n",
-                                reg_result->rkey);
-
-                result_to_network(reg_result);
-            }
-
-            ret = qemu_rdma_post_send_control(rdma,
-                            (uint8_t *) results, &reg_resp);
-
-            if (ret < 0) {
-                fprintf(stderr, "Failed to send control buffer!\n");
-                goto out;
-            }
-            break;
-        case RDMA_CONTROL_UNREGISTER_REQUEST:
-            DDPRINTF("There are %d unregistration requests\n", head.repeat);
-            unreg_resp.repeat = head.repeat;
-            registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
-
-            for (count = 0; count < head.repeat; count++) {
-                reg = &registers[count];
-                network_to_register(reg);
-
-                DDPRINTF("Unregistration request (%d): "
-                         " index %d, chunk %" PRIu64 "\n",
-                         count, reg->current_index, reg->key.chunk);
-
-                block = &(rdma->local_ram_blocks.block[reg->current_index]);
-
-                ret = ibv_dereg_mr(block->pmr[reg->key.chunk]);
-                block->pmr[reg->key.chunk] = NULL;
-
-                if (ret != 0) {
-                    perror("rdma unregistration chunk failed");
-                    ret = -ret;
-                    goto out;
-                }
-
-                rdma->total_registrations--;
-
-                DDPRINTF("Unregistered chunk %" PRIu64 " successfully.\n",
-                            reg->key.chunk);
-            }
-
-            ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
-
-            if (ret < 0) {
-                fprintf(stderr, "Failed to send control buffer!\n");
-                goto out;
-            }
-            break;
-        case RDMA_CONTROL_REGISTER_RESULT:
-            fprintf(stderr, "Invalid RESULT message at dest.\n");
-            ret = -EIO;
-            goto out;
-        default:
-            fprintf(stderr, "Unknown control message %s\n",
-                                control_desc[head.type]);
-            ret = -EIO;
-            goto out;
-        }
-    } while (1);
-out:
-    if (ret < 0) {
-        rdma->error_state = ret;
-    }
-    return ret;
-}
-
-static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
-                                        uint64_t flags)
-{
-    QEMUFileRDMA *rfile = opaque;
-    RDMAContext *rdma = rfile->rdma;
-
-    CHECK_ERROR_STATE();
-
-    DDDPRINTF("start section: %" PRIu64 "\n", flags);
-    qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
-    qemu_fflush(f);
-
-    return 0;
-}
-
-/*
- * Inform dest that dynamic registrations are done for now.
- * First, flush writes, if any.
- */
-static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
-                                       uint64_t flags)
-{
-    Error *local_err = NULL, **errp = &local_err;
-    QEMUFileRDMA *rfile = opaque;
-    RDMAContext *rdma = rfile->rdma;
-    RDMAControlHeader head = { .len = 0, .repeat = 1 };
-    int ret = 0;
-
-    CHECK_ERROR_STATE();
-
-    qemu_fflush(f);
-    ret = qemu_rdma_drain_cq(f, rdma);
-
-    if (ret < 0) {
-        goto err;
-    }
-
-    if (flags == RAM_CONTROL_SETUP) {
-        RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT };
-        RDMALocalBlocks *local = &rdma->local_ram_blocks;
-        int reg_result_idx, i, j, nb_remote_blocks;
-
-        head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
-        DPRINTF("Sending registration setup for ram blocks...\n");
-
-        /*
-         * Make sure that we parallelize the pinning on both sides.
-         * For very large guests, doing this serially takes a really
-         * long time, so we have to 'interleave' the pinning locally
-         * with the control messages by performing the pinning on this
-         * side before we receive the control response from the other
-         * side that the pinning has completed.
-         */
-        ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp,
-                    &reg_result_idx, rdma->pin_all ?
-                    qemu_rdma_reg_whole_ram_blocks : NULL);
-        if (ret < 0) {
-            ERROR(errp, "receiving remote info!");
-            return ret;
-        }
-
-        nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock);
-
-        /*
-         * The protocol uses two different sets of rkeys (mutually exclusive):
-         * 1. One key to represent the virtual address of the entire ram block.
-         *    (dynamic chunk registration disabled - pin everything with one rkey.)
-         * 2. One to represent individual chunks within a ram block.
-         *    (dynamic chunk registration enabled - pin individual chunks.)
-         *
-         * Once the capability is successfully negotiated, the destination transmits
-         * the keys to use (or sends them later) including the virtual addresses
-         * and then propagates the remote ram block descriptions to his local copy.
-         */
-
-        if (local->nb_blocks != nb_remote_blocks) {
-            ERROR(errp, "ram blocks mismatch #1! "
-                        "Your QEMU command line parameters are probably "
-                        "not identical on both the source and destination.");
-            return -EINVAL;
-        }
-
-        qemu_rdma_move_header(rdma, reg_result_idx, &resp);
-        memcpy(rdma->block,
-            rdma->wr_data[reg_result_idx].control_curr, resp.len);
-        for (i = 0; i < nb_remote_blocks; i++) {
-            network_to_remote_block(&rdma->block[i]);
-
-            /* search local ram blocks */
-            for (j = 0; j < local->nb_blocks; j++) {
-                if (rdma->block[i].offset != local->block[j].offset) {
-                    continue;
-                }
-
-                if (rdma->block[i].length != local->block[j].length) {
-                    ERROR(errp, "ram blocks mismatch #2! "
-                        "Your QEMU command line parameters are probably "
-                        "not identical on both the source and destination.");
-                    return -EINVAL;
-                }
-                local->block[j].remote_host_addr =
-                        rdma->block[i].remote_host_addr;
-                local->block[j].remote_rkey = rdma->block[i].remote_rkey;
-                break;
-            }
-
-            if (j >= local->nb_blocks) {
-                ERROR(errp, "ram blocks mismatch #3! "
-                        "Your QEMU command line parameters are probably "
-                        "not identical on both the source and destination.");
-                return -EINVAL;
-            }
-        }
-    }
-
-    DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags);
-
-    head.type = RDMA_CONTROL_REGISTER_FINISHED;
-    ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
-
-    if (ret < 0) {
-        goto err;
-    }
-
-    return 0;
-err:
-    rdma->error_state = ret;
-    return ret;
-}
-
-static int qemu_rdma_get_fd(void *opaque)
-{
-    QEMUFileRDMA *rfile = opaque;
-    RDMAContext *rdma = rfile->rdma;
-
-    return rdma->comp_channel->fd;
-}
-
-const QEMUFileOps rdma_read_ops = {
-    .get_buffer    = qemu_rdma_get_buffer,
-    .get_fd        = qemu_rdma_get_fd,
-    .close         = qemu_rdma_close,
-    .hook_ram_load = qemu_rdma_registration_handle,
-};
-
-const QEMUFileOps rdma_write_ops = {
-    .put_buffer         = qemu_rdma_put_buffer,
-    .close              = qemu_rdma_close,
-    .before_ram_iterate = qemu_rdma_registration_start,
-    .after_ram_iterate  = qemu_rdma_registration_stop,
-    .save_page          = qemu_rdma_save_page,
-};
-
-static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
-{
-    QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA));
-
-    if (qemu_file_mode_is_not_valid(mode)) {
-        return NULL;
-    }
-
-    r->rdma = rdma;
-
-    if (mode[0] == 'w') {
-        r->file = qemu_fopen_ops(r, &rdma_write_ops);
-    } else {
-        r->file = qemu_fopen_ops(r, &rdma_read_ops);
-    }
-
-    return r->file;
-}
-
-static void rdma_accept_incoming_migration(void *opaque)
-{
-    RDMAContext *rdma = opaque;
-    int ret;
-    QEMUFile *f;
-    Error *local_err = NULL, **errp = &local_err;
-
-    DPRINTF("Accepting rdma connection...\n");
-    ret = qemu_rdma_accept(rdma);
-
-    if (ret) {
-        ERROR(errp, "RDMA Migration initialization failed!");
-        return;
-    }
-
-    DPRINTF("Accepted migration\n");
-
-    f = qemu_fopen_rdma(rdma, "rb");
-    if (f == NULL) {
-        ERROR(errp, "could not qemu_fopen_rdma!");
-        qemu_rdma_cleanup(rdma);
-        return;
-    }
-
-    rdma->migration_started_on_destination = 1;
-    process_incoming_migration(f);
-}
-
-void rdma_start_incoming_migration(const char *host_port, Error **errp)
-{
-    int ret;
-    RDMAContext *rdma;
-    Error *local_err = NULL;
-
-    DPRINTF("Starting RDMA-based incoming migration\n");
-    rdma = qemu_rdma_data_init(host_port, &local_err);
-
-    if (rdma == NULL) {
-        goto err;
-    }
-
-    ret = qemu_rdma_dest_init(rdma, &local_err);
-
-    if (ret) {
-        goto err;
-    }
-
-    DPRINTF("qemu_rdma_dest_init success\n");
-
-    ret = rdma_listen(rdma->listen_id, 5);
-
-    if (ret) {
-        ERROR(errp, "listening on socket!");
-        goto err;
-    }
-
-    DPRINTF("rdma_listen success\n");
-
-    qemu_set_fd_handler2(rdma->channel->fd, NULL,
-                         rdma_accept_incoming_migration, NULL,
-                            (void *)(intptr_t) rdma);
-    return;
-err:
-    error_propagate(errp, local_err);
-    g_free(rdma);
-}
-
-void rdma_start_outgoing_migration(void *opaque,
-                            const char *host_port, Error **errp)
-{
-    MigrationState *s = opaque;
-    Error *local_err = NULL, **temp = &local_err;
-    RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err);
-    int ret = 0;
-
-    if (rdma == NULL) {
-        ERROR(temp, "Failed to initialize RDMA data structures! %d", ret);
-        goto err;
-    }
-
-    ret = qemu_rdma_source_init(rdma, &local_err,
-        s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
-
-    if (ret) {
-        goto err;
-    }
-
-    DPRINTF("qemu_rdma_source_init success\n");
-    ret = qemu_rdma_connect(rdma, &local_err);
-
-    if (ret) {
-        goto err;
-    }
-
-    DPRINTF("qemu_rdma_source_connect success\n");
-
-    s->file = qemu_fopen_rdma(rdma, "wb");
-    migrate_fd_connect(s);
-    return;
-err:
-    error_propagate(errp, local_err);
-    g_free(rdma);
-    migrate_fd_error(s);
-}
diff --git a/migration-tcp.c b/migration-tcp.c
deleted file mode 100644 (file)
index 91c9cf3..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * QEMU live migration
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- *  Anthony Liguori   <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include <string.h>
-
-#include "qemu-common.h"
-#include "qemu/error-report.h"
-#include "qemu/sockets.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "block/block.h"
-#include "qemu/main-loop.h"
-
-//#define DEBUG_MIGRATION_TCP
-
-#ifdef DEBUG_MIGRATION_TCP
-#define DPRINTF(fmt, ...) \
-    do { printf("migration-tcp: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-static void tcp_wait_for_connect(int fd, Error *err, void *opaque)
-{
-    MigrationState *s = opaque;
-
-    if (fd < 0) {
-        DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
-        s->file = NULL;
-        migrate_fd_error(s);
-    } else {
-        DPRINTF("migrate connect success\n");
-        s->file = qemu_fopen_socket(fd, "wb");
-        migrate_fd_connect(s);
-    }
-}
-
-void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp)
-{
-    inet_nonblocking_connect(host_port, tcp_wait_for_connect, s, errp);
-}
-
-static void tcp_accept_incoming_migration(void *opaque)
-{
-    struct sockaddr_in addr;
-    socklen_t addrlen = sizeof(addr);
-    int s = (intptr_t)opaque;
-    QEMUFile *f;
-    int c, err;
-
-    do {
-        c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
-        err = socket_error();
-    } while (c < 0 && err == EINTR);
-    qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
-    closesocket(s);
-
-    DPRINTF("accepted migration\n");
-
-    if (c < 0) {
-        error_report("could not accept migration connection (%s)",
-                     strerror(err));
-        return;
-    }
-
-    f = qemu_fopen_socket(c, "rb");
-    if (f == NULL) {
-        error_report("could not qemu_fopen socket");
-        goto out;
-    }
-
-    process_incoming_migration(f);
-    return;
-
-out:
-    closesocket(c);
-}
-
-void tcp_start_incoming_migration(const char *host_port, Error **errp)
-{
-    int s;
-
-    s = inet_listen(host_port, NULL, 256, SOCK_STREAM, 0, errp);
-    if (s < 0) {
-        return;
-    }
-
-    qemu_set_fd_handler2(s, NULL, tcp_accept_incoming_migration, NULL,
-                         (void *)(intptr_t)s);
-}
diff --git a/migration-unix.c b/migration-unix.c
deleted file mode 100644 (file)
index 1cdadfb..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * QEMU live migration via Unix Domain Sockets
- *
- * Copyright Red Hat, Inc. 2009
- *
- * Authors:
- *  Chris Lalancette <clalance@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include <string.h>
-
-#include "qemu-common.h"
-#include "qemu/error-report.h"
-#include "qemu/sockets.h"
-#include "qemu/main-loop.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "block/block.h"
-
-//#define DEBUG_MIGRATION_UNIX
-
-#ifdef DEBUG_MIGRATION_UNIX
-#define DPRINTF(fmt, ...) \
-    do { printf("migration-unix: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
-    do { } while (0)
-#endif
-
-static void unix_wait_for_connect(int fd, Error *err, void *opaque)
-{
-    MigrationState *s = opaque;
-
-    if (fd < 0) {
-        DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
-        s->file = NULL;
-        migrate_fd_error(s);
-    } else {
-        DPRINTF("migrate connect success\n");
-        s->file = qemu_fopen_socket(fd, "wb");
-        migrate_fd_connect(s);
-    }
-}
-
-void unix_start_outgoing_migration(MigrationState *s, const char *path, Error **errp)
-{
-    unix_nonblocking_connect(path, unix_wait_for_connect, s, errp);
-}
-
-static void unix_accept_incoming_migration(void *opaque)
-{
-    struct sockaddr_un addr;
-    socklen_t addrlen = sizeof(addr);
-    int s = (intptr_t)opaque;
-    QEMUFile *f;
-    int c, err;
-
-    do {
-        c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
-        err = errno;
-    } while (c < 0 && err == EINTR);
-    qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
-    close(s);
-
-    DPRINTF("accepted migration\n");
-
-    if (c < 0) {
-        error_report("could not accept migration connection (%s)",
-                     strerror(err));
-        return;
-    }
-
-    f = qemu_fopen_socket(c, "rb");
-    if (f == NULL) {
-        error_report("could not qemu_fopen socket");
-        goto out;
-    }
-
-    process_incoming_migration(f);
-    return;
-
-out:
-    close(c);
-}
-
-void unix_start_incoming_migration(const char *path, Error **errp)
-{
-    int s;
-
-    s = unix_listen(path, NULL, 0, errp);
-    if (s < 0) {
-        return;
-    }
-
-    qemu_set_fd_handler2(s, NULL, unix_accept_incoming_migration, NULL,
-                         (void *)(intptr_t)s);
-}
diff --git a/migration.c b/migration.c
deleted file mode 100644 (file)
index c49a05a..0000000
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * QEMU live migration
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- *  Anthony Liguori   <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
-
-#include "qemu-common.h"
-#include "qemu/main-loop.h"
-#include "migration/migration.h"
-#include "monitor/monitor.h"
-#include "migration/qemu-file.h"
-#include "sysemu/sysemu.h"
-#include "block/block.h"
-#include "qemu/sockets.h"
-#include "migration/block.h"
-#include "qemu/thread.h"
-#include "qmp-commands.h"
-#include "trace.h"
-
-enum {
-    MIG_STATE_ERROR = -1,
-    MIG_STATE_NONE,
-    MIG_STATE_SETUP,
-    MIG_STATE_CANCELLING,
-    MIG_STATE_CANCELLED,
-    MIG_STATE_ACTIVE,
-    MIG_STATE_COMPLETED,
-};
-
-#define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
-
-/* Amount of time to allocate to each "chunk" of bandwidth-throttled
- * data. */
-#define BUFFER_DELAY     100
-#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
-
-/* Migration XBZRLE default cache size */
-#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
-
-static NotifierList migration_state_notifiers =
-    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
-
-/* When we add fault tolerance, we could have several
-   migrations at once.  For now we don't need to add
-   dynamic creation of migration */
-
-MigrationState *migrate_get_current(void)
-{
-    static MigrationState current_migration = {
-        .state = MIG_STATE_NONE,
-        .bandwidth_limit = MAX_THROTTLE,
-        .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
-        .mbps = -1,
-    };
-
-    return &current_migration;
-}
-
-void qemu_start_incoming_migration(const char *uri, Error **errp)
-{
-    const char *p;
-
-    if (strstart(uri, "tcp:", &p))
-        tcp_start_incoming_migration(p, errp);
-#ifdef CONFIG_RDMA
-    else if (strstart(uri, "rdma:", &p))
-        rdma_start_incoming_migration(p, errp);
-#endif
-#if !defined(WIN32)
-    else if (strstart(uri, "exec:", &p))
-        exec_start_incoming_migration(p, errp);
-    else if (strstart(uri, "unix:", &p))
-        unix_start_incoming_migration(p, errp);
-    else if (strstart(uri, "fd:", &p))
-        fd_start_incoming_migration(p, errp);
-#endif
-    else {
-        error_setg(errp, "unknown migration protocol: %s", uri);
-    }
-}
-
-static void process_incoming_migration_co(void *opaque)
-{
-    QEMUFile *f = opaque;
-    Error *local_err = NULL;
-    int ret;
-
-    ret = qemu_loadvm_state(f);
-    qemu_fclose(f);
-    free_xbzrle_decoded_buf();
-    if (ret < 0) {
-        error_report("load of migration failed: %s", strerror(-ret));
-        exit(EXIT_FAILURE);
-    }
-    qemu_announce_self();
-
-    /* Make sure all file formats flush their mutable metadata */
-    bdrv_invalidate_cache_all(&local_err);
-    if (local_err) {
-        qerror_report_err(local_err);
-        error_free(local_err);
-        exit(EXIT_FAILURE);
-    }
-
-    if (autostart) {
-        vm_start();
-    } else {
-        runstate_set(RUN_STATE_PAUSED);
-    }
-}
-
-void process_incoming_migration(QEMUFile *f)
-{
-    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
-    int fd = qemu_get_fd(f);
-
-    assert(fd != -1);
-    qemu_set_nonblock(fd);
-    qemu_coroutine_enter(co, f);
-}
-
-/* amount of nanoseconds we are willing to wait for migration to be down.
- * the choice of nanoseconds is because it is the maximum resolution that
- * get_clock() can achieve. It is an internal measure. All user-visible
- * units must be in seconds */
-static uint64_t max_downtime = 300000000;
-
-uint64_t migrate_max_downtime(void)
-{
-    return max_downtime;
-}
-
-MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
-{
-    MigrationCapabilityStatusList *head = NULL;
-    MigrationCapabilityStatusList *caps;
-    MigrationState *s = migrate_get_current();
-    int i;
-
-    caps = NULL; /* silence compiler warning */
-    for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
-        if (head == NULL) {
-            head = g_malloc0(sizeof(*caps));
-            caps = head;
-        } else {
-            caps->next = g_malloc0(sizeof(*caps));
-            caps = caps->next;
-        }
-        caps->value =
-            g_malloc(sizeof(*caps->value));
-        caps->value->capability = i;
-        caps->value->state = s->enabled_capabilities[i];
-    }
-
-    return head;
-}
-
-static void get_xbzrle_cache_stats(MigrationInfo *info)
-{
-    if (migrate_use_xbzrle()) {
-        info->has_xbzrle_cache = true;
-        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
-        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
-        info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
-        info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
-        info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
-        info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
-        info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
-    }
-}
-
-MigrationInfo *qmp_query_migrate(Error **errp)
-{
-    MigrationInfo *info = g_malloc0(sizeof(*info));
-    MigrationState *s = migrate_get_current();
-
-    switch (s->state) {
-    case MIG_STATE_NONE:
-        /* no migration has happened ever */
-        break;
-    case MIG_STATE_SETUP:
-        info->has_status = true;
-        info->status = g_strdup("setup");
-        info->has_total_time = false;
-        break;
-    case MIG_STATE_ACTIVE:
-    case MIG_STATE_CANCELLING:
-        info->has_status = true;
-        info->status = g_strdup("active");
-        info->has_total_time = true;
-        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
-            - s->total_time;
-        info->has_expected_downtime = true;
-        info->expected_downtime = s->expected_downtime;
-        info->has_setup_time = true;
-        info->setup_time = s->setup_time;
-
-        info->has_ram = true;
-        info->ram = g_malloc0(sizeof(*info->ram));
-        info->ram->transferred = ram_bytes_transferred();
-        info->ram->remaining = ram_bytes_remaining();
-        info->ram->total = ram_bytes_total();
-        info->ram->duplicate = dup_mig_pages_transferred();
-        info->ram->skipped = skipped_mig_pages_transferred();
-        info->ram->normal = norm_mig_pages_transferred();
-        info->ram->normal_bytes = norm_mig_bytes_transferred();
-        info->ram->dirty_pages_rate = s->dirty_pages_rate;
-        info->ram->mbps = s->mbps;
-        info->ram->dirty_sync_count = s->dirty_sync_count;
-
-        if (blk_mig_active()) {
-            info->has_disk = true;
-            info->disk = g_malloc0(sizeof(*info->disk));
-            info->disk->transferred = blk_mig_bytes_transferred();
-            info->disk->remaining = blk_mig_bytes_remaining();
-            info->disk->total = blk_mig_bytes_total();
-        }
-
-        get_xbzrle_cache_stats(info);
-        break;
-    case MIG_STATE_COMPLETED:
-        get_xbzrle_cache_stats(info);
-
-        info->has_status = true;
-        info->status = g_strdup("completed");
-        info->has_total_time = true;
-        info->total_time = s->total_time;
-        info->has_downtime = true;
-        info->downtime = s->downtime;
-        info->has_setup_time = true;
-        info->setup_time = s->setup_time;
-
-        info->has_ram = true;
-        info->ram = g_malloc0(sizeof(*info->ram));
-        info->ram->transferred = ram_bytes_transferred();
-        info->ram->remaining = 0;
-        info->ram->total = ram_bytes_total();
-        info->ram->duplicate = dup_mig_pages_transferred();
-        info->ram->skipped = skipped_mig_pages_transferred();
-        info->ram->normal = norm_mig_pages_transferred();
-        info->ram->normal_bytes = norm_mig_bytes_transferred();
-        info->ram->mbps = s->mbps;
-        info->ram->dirty_sync_count = s->dirty_sync_count;
-        break;
-    case MIG_STATE_ERROR:
-        info->has_status = true;
-        info->status = g_strdup("failed");
-        break;
-    case MIG_STATE_CANCELLED:
-        info->has_status = true;
-        info->status = g_strdup("cancelled");
-        break;
-    }
-
-    return info;
-}
-
-void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
-                                  Error **errp)
-{
-    MigrationState *s = migrate_get_current();
-    MigrationCapabilityStatusList *cap;
-
-    if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
-        error_set(errp, QERR_MIGRATION_ACTIVE);
-        return;
-    }
-
-    for (cap = params; cap; cap = cap->next) {
-        s->enabled_capabilities[cap->value->capability] = cap->value->state;
-    }
-}
-
-/* shared migration helpers */
-
-static void migrate_set_state(MigrationState *s, int old_state, int new_state)
-{
-    if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) {
-        trace_migrate_set_state(new_state);
-    }
-}
-
-static void migrate_fd_cleanup(void *opaque)
-{
-    MigrationState *s = opaque;
-
-    qemu_bh_delete(s->cleanup_bh);
-    s->cleanup_bh = NULL;
-
-    if (s->file) {
-        trace_migrate_fd_cleanup();
-        qemu_mutex_unlock_iothread();
-        qemu_thread_join(&s->thread);
-        qemu_mutex_lock_iothread();
-
-        qemu_fclose(s->file);
-        s->file = NULL;
-    }
-
-    assert(s->state != MIG_STATE_ACTIVE);
-
-    if (s->state != MIG_STATE_COMPLETED) {
-        qemu_savevm_state_cancel();
-        if (s->state == MIG_STATE_CANCELLING) {
-            migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED);
-        }
-    }
-
-    notifier_list_notify(&migration_state_notifiers, s);
-}
-
-void migrate_fd_error(MigrationState *s)
-{
-    trace_migrate_fd_error();
-    assert(s->file == NULL);
-    s->state = MIG_STATE_ERROR;
-    trace_migrate_set_state(MIG_STATE_ERROR);
-    notifier_list_notify(&migration_state_notifiers, s);
-}
-
-static void migrate_fd_cancel(MigrationState *s)
-{
-    int old_state ;
-    trace_migrate_fd_cancel();
-
-    do {
-        old_state = s->state;
-        if (old_state != MIG_STATE_SETUP && old_state != MIG_STATE_ACTIVE) {
-            break;
-        }
-        migrate_set_state(s, old_state, MIG_STATE_CANCELLING);
-    } while (s->state != MIG_STATE_CANCELLING);
-}
-
-void add_migration_state_change_notifier(Notifier *notify)
-{
-    notifier_list_add(&migration_state_notifiers, notify);
-}
-
-void remove_migration_state_change_notifier(Notifier *notify)
-{
-    notifier_remove(notify);
-}
-
-bool migration_in_setup(MigrationState *s)
-{
-    return s->state == MIG_STATE_SETUP;
-}
-
-bool migration_has_finished(MigrationState *s)
-{
-    return s->state == MIG_STATE_COMPLETED;
-}
-
-bool migration_has_failed(MigrationState *s)
-{
-    return (s->state == MIG_STATE_CANCELLED ||
-            s->state == MIG_STATE_ERROR);
-}
-
-static MigrationState *migrate_init(const MigrationParams *params)
-{
-    MigrationState *s = migrate_get_current();
-    int64_t bandwidth_limit = s->bandwidth_limit;
-    bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
-    int64_t xbzrle_cache_size = s->xbzrle_cache_size;
-
-    memcpy(enabled_capabilities, s->enabled_capabilities,
-           sizeof(enabled_capabilities));
-
-    memset(s, 0, sizeof(*s));
-    s->params = *params;
-    memcpy(s->enabled_capabilities, enabled_capabilities,
-           sizeof(enabled_capabilities));
-    s->xbzrle_cache_size = xbzrle_cache_size;
-
-    s->bandwidth_limit = bandwidth_limit;
-    s->state = MIG_STATE_SETUP;
-    trace_migrate_set_state(MIG_STATE_SETUP);
-
-    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
-    return s;
-}
-
-static GSList *migration_blockers;
-
-void migrate_add_blocker(Error *reason)
-{
-    migration_blockers = g_slist_prepend(migration_blockers, reason);
-}
-
-void migrate_del_blocker(Error *reason)
-{
-    migration_blockers = g_slist_remove(migration_blockers, reason);
-}
-
-void qmp_migrate(const char *uri, bool has_blk, bool blk,
-                 bool has_inc, bool inc, bool has_detach, bool detach,
-                 Error **errp)
-{
-    Error *local_err = NULL;
-    MigrationState *s = migrate_get_current();
-    MigrationParams params;
-    const char *p;
-
-    params.blk = has_blk && blk;
-    params.shared = has_inc && inc;
-
-    if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP ||
-        s->state == MIG_STATE_CANCELLING) {
-        error_set(errp, QERR_MIGRATION_ACTIVE);
-        return;
-    }
-
-    if (runstate_check(RUN_STATE_INMIGRATE)) {
-        error_setg(errp, "Guest is waiting for an incoming migration");
-        return;
-    }
-
-    if (qemu_savevm_state_blocked(errp)) {
-        return;
-    }
-
-    if (migration_blockers) {
-        *errp = error_copy(migration_blockers->data);
-        return;
-    }
-
-    s = migrate_init(&params);
-
-    if (strstart(uri, "tcp:", &p)) {
-        tcp_start_outgoing_migration(s, p, &local_err);
-#ifdef CONFIG_RDMA
-    } else if (strstart(uri, "rdma:", &p)) {
-        rdma_start_outgoing_migration(s, p, &local_err);
-#endif
-#if !defined(WIN32)
-    } else if (strstart(uri, "exec:", &p)) {
-        exec_start_outgoing_migration(s, p, &local_err);
-    } else if (strstart(uri, "unix:", &p)) {
-        unix_start_outgoing_migration(s, p, &local_err);
-    } else if (strstart(uri, "fd:", &p)) {
-        fd_start_outgoing_migration(s, p, &local_err);
-#endif
-    } else {
-        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
-        s->state = MIG_STATE_ERROR;
-        return;
-    }
-
-    if (local_err) {
-        migrate_fd_error(s);
-        error_propagate(errp, local_err);
-        return;
-    }
-}
-
-void qmp_migrate_cancel(Error **errp)
-{
-    migrate_fd_cancel(migrate_get_current());
-}
-
-void qmp_migrate_set_cache_size(int64_t value, Error **errp)
-{
-    MigrationState *s = migrate_get_current();
-    int64_t new_size;
-
-    /* Check for truncation */
-    if (value != (size_t)value) {
-        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
-                  "exceeding address space");
-        return;
-    }
-
-    /* Cache should not be larger than guest ram size */
-    if (value > ram_bytes_total()) {
-        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
-                  "exceeds guest ram size ");
-        return;
-    }
-
-    new_size = xbzrle_cache_resize(value);
-    if (new_size < 0) {
-        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
-                  "is smaller than page size");
-        return;
-    }
-
-    s->xbzrle_cache_size = new_size;
-}
-
-int64_t qmp_query_migrate_cache_size(Error **errp)
-{
-    return migrate_xbzrle_cache_size();
-}
-
-void qmp_migrate_set_speed(int64_t value, Error **errp)
-{
-    MigrationState *s;
-
-    if (value < 0) {
-        value = 0;
-    }
-    if (value > SIZE_MAX) {
-        value = SIZE_MAX;
-    }
-
-    s = migrate_get_current();
-    s->bandwidth_limit = value;
-    if (s->file) {
-        qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
-    }
-}
-
-void qmp_migrate_set_downtime(double value, Error **errp)
-{
-    value *= 1e9;
-    value = MAX(0, MIN(UINT64_MAX, value));
-    max_downtime = (uint64_t)value;
-}
-
-bool migrate_rdma_pin_all(void)
-{
-    MigrationState *s;
-
-    s = migrate_get_current();
-
-    return s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
-}
-
-bool migrate_auto_converge(void)
-{
-    MigrationState *s;
-
-    s = migrate_get_current();
-
-    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
-}
-
-bool migrate_zero_blocks(void)
-{
-    MigrationState *s;
-
-    s = migrate_get_current();
-
-    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
-}
-
-int migrate_use_xbzrle(void)
-{
-    MigrationState *s;
-
-    s = migrate_get_current();
-
-    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
-}
-
-int64_t migrate_xbzrle_cache_size(void)
-{
-    MigrationState *s;
-
-    s = migrate_get_current();
-
-    return s->xbzrle_cache_size;
-}
-
-/* migration thread support */
-
-static void *migration_thread(void *opaque)
-{
-    MigrationState *s = opaque;
-    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
-    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
-    int64_t initial_bytes = 0;
-    int64_t max_size = 0;
-    int64_t start_time = initial_time;
-    bool old_vm_running = false;
-
-    qemu_savevm_state_begin(s->file, &s->params);
-
-    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
-    migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
-
-    while (s->state == MIG_STATE_ACTIVE) {
-        int64_t current_time;
-        uint64_t pending_size;
-
-        if (!qemu_file_rate_limit(s->file)) {
-            pending_size = qemu_savevm_state_pending(s->file, max_size);
-            trace_migrate_pending(pending_size, max_size);
-            if (pending_size && pending_size >= max_size) {
-                qemu_savevm_state_iterate(s->file);
-            } else {
-                int ret;
-
-                qemu_mutex_lock_iothread();
-                start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
-                qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
-                old_vm_running = runstate_is_running();
-
-                ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
-                if (ret >= 0) {
-                    qemu_file_set_rate_limit(s->file, INT64_MAX);
-                    qemu_savevm_state_complete(s->file);
-                }
-                qemu_mutex_unlock_iothread();
-
-                if (ret < 0) {
-                    migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
-                    break;
-                }
-
-                if (!qemu_file_get_error(s->file)) {
-                    migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
-                    break;
-                }
-            }
-        }
-
-        if (qemu_file_get_error(s->file)) {
-            migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
-            break;
-        }
-        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
-        if (current_time >= initial_time + BUFFER_DELAY) {
-            uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
-            uint64_t time_spent = current_time - initial_time;
-            double bandwidth = transferred_bytes / time_spent;
-            max_size = bandwidth * migrate_max_downtime() / 1000000;
-
-            s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
-                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
-
-            trace_migrate_transferred(transferred_bytes, time_spent,
-                                      bandwidth, max_size);
-            /* if we haven't sent anything, we don't want to recalculate
-               10000 is a small enough number for our purposes */
-            if (s->dirty_bytes_rate && transferred_bytes > 10000) {
-                s->expected_downtime = s->dirty_bytes_rate / bandwidth;
-            }
-
-            qemu_file_reset_rate_limit(s->file);
-            initial_time = current_time;
-            initial_bytes = qemu_ftell(s->file);
-        }
-        if (qemu_file_rate_limit(s->file)) {
-            /* usleep expects microseconds */
-            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
-        }
-    }
-
-    qemu_mutex_lock_iothread();
-    if (s->state == MIG_STATE_COMPLETED) {
-        int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
-        uint64_t transferred_bytes = qemu_ftell(s->file);
-        s->total_time = end_time - s->total_time;
-        s->downtime = end_time - start_time;
-        if (s->total_time) {
-            s->mbps = (((double) transferred_bytes * 8.0) /
-                       ((double) s->total_time)) / 1000;
-        }
-        runstate_set(RUN_STATE_POSTMIGRATE);
-    } else {
-        if (old_vm_running) {
-            vm_start();
-        }
-    }
-    qemu_bh_schedule(s->cleanup_bh);
-    qemu_mutex_unlock_iothread();
-
-    return NULL;
-}
-
-void migrate_fd_connect(MigrationState *s)
-{
-    s->state = MIG_STATE_SETUP;
-    trace_migrate_set_state(MIG_STATE_SETUP);
-
-    /* This is a best 1st approximation. ns to ms */
-    s->expected_downtime = max_downtime/1000000;
-    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
-
-    qemu_file_set_rate_limit(s->file,
-                             s->bandwidth_limit / XFER_LIMIT_RATIO);
-
-    /* Notify before starting migration thread */
-    notifier_list_notify(&migration_state_notifiers, s);
-
-    qemu_thread_create(&s->thread, "migration", migration_thread, s,
-                       QEMU_THREAD_JOINABLE);
-}
diff --git a/migration/Makefile.objs b/migration/Makefile.objs
new file mode 100644 (file)
index 0000000..63dbe93
--- /dev/null
@@ -0,0 +1,10 @@
+common-obj-y += migration.o migration-tcp.o
+common-obj-y += vmstate.o
+common-obj-y += qemu-file.o qemu-file-unix.o qemu-file-stdio.o
+common-obj-$(CONFIG_RDMA) += migration-rdma.o
+common-obj-y += xbzrle.o
+
+common-obj-$(CONFIG_POSIX) += migration-exec.o migration-unix.o migration-fd.o
+
+common-obj-y += block-migration.o
+
diff --git a/migration/block-migration.c b/migration/block-migration.c
new file mode 100644 (file)
index 0000000..74d9eb1
--- /dev/null
@@ -0,0 +1,892 @@
+/*
+ * QEMU live block migration
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ *  Liran Schour   <lirans@il.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu-common.h"
+#include "block/block.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "hw/hw.h"
+#include "qemu/queue.h"
+#include "qemu/timer.h"
+#include "migration/block.h"
+#include "migration/migration.h"
+#include "sysemu/blockdev.h"
+#include <assert.h>
+
+#define BLOCK_SIZE                       (1 << 20)
+#define BDRV_SECTORS_PER_DIRTY_CHUNK     (BLOCK_SIZE >> BDRV_SECTOR_BITS)
+
+#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
+#define BLK_MIG_FLAG_EOS                0x02
+#define BLK_MIG_FLAG_PROGRESS           0x04
+#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
+
+#define MAX_IS_ALLOCATED_SEARCH 65536
+
+//#define DEBUG_BLK_MIGRATION
+
+#ifdef DEBUG_BLK_MIGRATION
+#define DPRINTF(fmt, ...) \
+    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+typedef struct BlkMigDevState {
+    /* Written during setup phase.  Can be read without a lock.  */
+    BlockDriverState *bs;
+    int shared_base;
+    int64_t total_sectors;
+    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
+
+    /* Only used by migration thread.  Does not need a lock.  */
+    int bulk_completed;
+    int64_t cur_sector;
+    int64_t cur_dirty;
+
+    /* Protected by block migration lock.  */
+    unsigned long *aio_bitmap;
+    int64_t completed_sectors;
+    BdrvDirtyBitmap *dirty_bitmap;
+    Error *blocker;
+} BlkMigDevState;
+
+typedef struct BlkMigBlock {
+    /* Only used by migration thread.  */
+    uint8_t *buf;
+    BlkMigDevState *bmds;
+    int64_t sector;
+    int nr_sectors;
+    struct iovec iov;
+    QEMUIOVector qiov;
+    BlockAIOCB *aiocb;
+
+    /* Protected by block migration lock.  */
+    int ret;
+    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
+} BlkMigBlock;
+
+typedef struct BlkMigState {
+    /* Written during setup phase.  Can be read without a lock.  */
+    int blk_enable;
+    int shared_base;
+    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
+    int64_t total_sector_sum;
+    bool zero_blocks;
+
+    /* Protected by lock.  */
+    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
+    int submitted;
+    int read_done;
+
+    /* Only used by migration thread.  Does not need a lock.  */
+    int transferred;
+    int prev_progress;
+    int bulk_completed;
+
+    /* Lock must be taken _inside_ the iothread lock.  */
+    QemuMutex lock;
+} BlkMigState;
+
+static BlkMigState block_mig_state;
+
+static void blk_mig_lock(void)
+{
+    qemu_mutex_lock(&block_mig_state.lock);
+}
+
+static void blk_mig_unlock(void)
+{
+    qemu_mutex_unlock(&block_mig_state.lock);
+}
+
+/* Must run outside of the iothread lock during the bulk phase,
+ * or the VM will stall.
+ */
+
+static void blk_send(QEMUFile *f, BlkMigBlock * blk)
+{
+    int len;
+    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
+
+    if (block_mig_state.zero_blocks &&
+        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
+    }
+
+    /* sector number and flags */
+    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
+                     | flags);
+
+    /* device name */
+    len = strlen(bdrv_get_device_name(blk->bmds->bs));
+    qemu_put_byte(f, len);
+    qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len);
+
+    /* if a block is zero we need to flush here since the network
+     * bandwidth is now a lot higher than the storage device bandwidth.
+     * thus if we queue zero blocks we slow down the migration */
+    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+        qemu_fflush(f);
+        return;
+    }
+
+    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
+}
+
+int blk_mig_active(void)
+{
+    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
+}
+
+uint64_t blk_mig_bytes_transferred(void)
+{
+    BlkMigDevState *bmds;
+    uint64_t sum = 0;
+
+    blk_mig_lock();
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        sum += bmds->completed_sectors;
+    }
+    blk_mig_unlock();
+    return sum << BDRV_SECTOR_BITS;
+}
+
+uint64_t blk_mig_bytes_remaining(void)
+{
+    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
+}
+
+uint64_t blk_mig_bytes_total(void)
+{
+    BlkMigDevState *bmds;
+    uint64_t sum = 0;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        sum += bmds->total_sectors;
+    }
+    return sum << BDRV_SECTOR_BITS;
+}
+
+
+/* Called with migration lock held.  */
+
+static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
+{
+    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
+
+    if (sector < bdrv_nb_sectors(bmds->bs)) {
+        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
+            (1UL << (chunk % (sizeof(unsigned long) * 8))));
+    } else {
+        return 0;
+    }
+}
+
+/* Called with migration lock held.  */
+
+static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
+                             int nb_sectors, int set)
+{
+    int64_t start, end;
+    unsigned long val, idx, bit;
+
+    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
+    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
+
+    for (; start <= end; start++) {
+        idx = start / (sizeof(unsigned long) * 8);
+        bit = start % (sizeof(unsigned long) * 8);
+        val = bmds->aio_bitmap[idx];
+        if (set) {
+            val |= 1UL << bit;
+        } else {
+            val &= ~(1UL << bit);
+        }
+        bmds->aio_bitmap[idx] = val;
+    }
+}
+
+static void alloc_aio_bitmap(BlkMigDevState *bmds)
+{
+    BlockDriverState *bs = bmds->bs;
+    int64_t bitmap_size;
+
+    bitmap_size = bdrv_nb_sectors(bs) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
+    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
+
+    bmds->aio_bitmap = g_malloc0(bitmap_size);
+}
+
+/* Never hold migration lock when yielding to the main loop!  */
+
+static void blk_mig_read_cb(void *opaque, int ret)
+{
+    BlkMigBlock *blk = opaque;
+
+    blk_mig_lock();
+    blk->ret = ret;
+
+    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
+    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
+
+    block_mig_state.submitted--;
+    block_mig_state.read_done++;
+    assert(block_mig_state.submitted >= 0);
+    blk_mig_unlock();
+}
+
+/* Called with no lock taken.  */
+
+static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
+{
+    int64_t total_sectors = bmds->total_sectors;
+    int64_t cur_sector = bmds->cur_sector;
+    BlockDriverState *bs = bmds->bs;
+    BlkMigBlock *blk;
+    int nr_sectors;
+
+    if (bmds->shared_base) {
+        qemu_mutex_lock_iothread();
+        while (cur_sector < total_sectors &&
+               !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
+                                  &nr_sectors)) {
+            cur_sector += nr_sectors;
+        }
+        qemu_mutex_unlock_iothread();
+    }
+
+    if (cur_sector >= total_sectors) {
+        bmds->cur_sector = bmds->completed_sectors = total_sectors;
+        return 1;
+    }
+
+    bmds->completed_sectors = cur_sector;
+
+    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
+
+    /* we are going to transfer a full block even if it is not allocated */
+    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
+
+    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
+        nr_sectors = total_sectors - cur_sector;
+    }
+
+    blk = g_new(BlkMigBlock, 1);
+    blk->buf = g_malloc(BLOCK_SIZE);
+    blk->bmds = bmds;
+    blk->sector = cur_sector;
+    blk->nr_sectors = nr_sectors;
+
+    blk->iov.iov_base = blk->buf;
+    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
+    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
+
+    blk_mig_lock();
+    block_mig_state.submitted++;
+    blk_mig_unlock();
+
+    qemu_mutex_lock_iothread();
+    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
+                                nr_sectors, blk_mig_read_cb, blk);
+
+    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
+    qemu_mutex_unlock_iothread();
+
+    bmds->cur_sector = cur_sector + nr_sectors;
+    return (bmds->cur_sector >= total_sectors);
+}
+
+/* Called with iothread lock taken.  */
+
+static int set_dirty_tracking(void)
+{
+    BlkMigDevState *bmds;
+    int ret;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
+                                                      NULL);
+        if (!bmds->dirty_bitmap) {
+            ret = -errno;
+            goto fail;
+        }
+    }
+    return 0;
+
+fail:
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        if (bmds->dirty_bitmap) {
+            bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
+        }
+    }
+    return ret;
+}
+
+static void unset_dirty_tracking(void)
+{
+    BlkMigDevState *bmds;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
+    }
+}
+
+static void init_blk_migration(QEMUFile *f)
+{
+    BlockDriverState *bs;
+    BlkMigDevState *bmds;
+    int64_t sectors;
+
+    block_mig_state.submitted = 0;
+    block_mig_state.read_done = 0;
+    block_mig_state.transferred = 0;
+    block_mig_state.total_sector_sum = 0;
+    block_mig_state.prev_progress = -1;
+    block_mig_state.bulk_completed = 0;
+    block_mig_state.zero_blocks = migrate_zero_blocks();
+
+    for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) {
+        if (bdrv_is_read_only(bs)) {
+            continue;
+        }
+
+        sectors = bdrv_nb_sectors(bs);
+        if (sectors <= 0) {
+            return;
+        }
+
+        bmds = g_new0(BlkMigDevState, 1);
+        bmds->bs = bs;
+        bmds->bulk_completed = 0;
+        bmds->total_sectors = sectors;
+        bmds->completed_sectors = 0;
+        bmds->shared_base = block_mig_state.shared_base;
+        alloc_aio_bitmap(bmds);
+        error_setg(&bmds->blocker, "block device is in use by migration");
+        bdrv_op_block_all(bs, bmds->blocker);
+        bdrv_ref(bs);
+
+        block_mig_state.total_sector_sum += sectors;
+
+        if (bmds->shared_base) {
+            DPRINTF("Start migration for %s with shared base image\n",
+                    bdrv_get_device_name(bs));
+        } else {
+            DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
+        }
+
+        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
+    }
+}
+
+/* Called with no lock taken.  */
+
+static int blk_mig_save_bulked_block(QEMUFile *f)
+{
+    int64_t completed_sector_sum = 0;
+    BlkMigDevState *bmds;
+    int progress;
+    int ret = 0;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        if (bmds->bulk_completed == 0) {
+            if (mig_save_device_bulk(f, bmds) == 1) {
+                /* completed bulk section for this device */
+                bmds->bulk_completed = 1;
+            }
+            completed_sector_sum += bmds->completed_sectors;
+            ret = 1;
+            break;
+        } else {
+            completed_sector_sum += bmds->completed_sectors;
+        }
+    }
+
+    if (block_mig_state.total_sector_sum != 0) {
+        progress = completed_sector_sum * 100 /
+                   block_mig_state.total_sector_sum;
+    } else {
+        progress = 100;
+    }
+    if (progress != block_mig_state.prev_progress) {
+        block_mig_state.prev_progress = progress;
+        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
+                         | BLK_MIG_FLAG_PROGRESS);
+        DPRINTF("Completed %d %%\r", progress);
+    }
+
+    return ret;
+}
+
+static void blk_mig_reset_dirty_cursor(void)
+{
+    BlkMigDevState *bmds;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        bmds->cur_dirty = 0;
+    }
+}
+
+/* Called with iothread lock taken.  */
+
+static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
+                                 int is_async)
+{
+    BlkMigBlock *blk;
+    int64_t total_sectors = bmds->total_sectors;
+    int64_t sector;
+    int nr_sectors;
+    int ret = -EIO;
+
+    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
+        blk_mig_lock();
+        if (bmds_aio_inflight(bmds, sector)) {
+            blk_mig_unlock();
+            bdrv_drain_all();
+        } else {
+            blk_mig_unlock();
+        }
+        if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
+
+            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
+                nr_sectors = total_sectors - sector;
+            } else {
+                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
+            }
+            blk = g_new(BlkMigBlock, 1);
+            blk->buf = g_malloc(BLOCK_SIZE);
+            blk->bmds = bmds;
+            blk->sector = sector;
+            blk->nr_sectors = nr_sectors;
+
+            if (is_async) {
+                blk->iov.iov_base = blk->buf;
+                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
+                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
+
+                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
+                                            nr_sectors, blk_mig_read_cb, blk);
+
+                blk_mig_lock();
+                block_mig_state.submitted++;
+                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
+                blk_mig_unlock();
+            } else {
+                ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
+                if (ret < 0) {
+                    goto error;
+                }
+                blk_send(f, blk);
+
+                g_free(blk->buf);
+                g_free(blk);
+            }
+
+            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
+            break;
+        }
+        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
+        bmds->cur_dirty = sector;
+    }
+
+    return (bmds->cur_dirty >= bmds->total_sectors);
+
+error:
+    DPRINTF("Error reading sector %" PRId64 "\n", sector);
+    g_free(blk->buf);
+    g_free(blk);
+    return ret;
+}
+
+/* Called with iothread lock taken.
+ *
+ * return value:
+ * 0: too much data for max_downtime
+ * 1: few enough data for max_downtime
+*/
+static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
+{
+    BlkMigDevState *bmds;
+    int ret = 1;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        ret = mig_save_device_dirty(f, bmds, is_async);
+        if (ret <= 0) {
+            break;
+        }
+    }
+
+    return ret;
+}
+
+/* Called with no locks taken.  */
+
+static int flush_blks(QEMUFile *f)
+{
+    BlkMigBlock *blk;
+    int ret = 0;
+
+    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
+            __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
+            block_mig_state.transferred);
+
+    blk_mig_lock();
+    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
+        if (qemu_file_rate_limit(f)) {
+            break;
+        }
+        if (blk->ret < 0) {
+            ret = blk->ret;
+            break;
+        }
+
+        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
+        blk_mig_unlock();
+        blk_send(f, blk);
+        blk_mig_lock();
+
+        g_free(blk->buf);
+        g_free(blk);
+
+        block_mig_state.read_done--;
+        block_mig_state.transferred++;
+        assert(block_mig_state.read_done >= 0);
+    }
+    blk_mig_unlock();
+
+    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
+            block_mig_state.submitted, block_mig_state.read_done,
+            block_mig_state.transferred);
+    return ret;
+}
+
+/* Called with iothread lock taken.  */
+
+static int64_t get_remaining_dirty(void)
+{
+    BlkMigDevState *bmds;
+    int64_t dirty = 0;
+
+    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+        dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap);
+    }
+
+    return dirty << BDRV_SECTOR_BITS;
+}
+
+/* Called with iothread lock taken.  */
+
+static void blk_mig_cleanup(void)
+{
+    BlkMigDevState *bmds;
+    BlkMigBlock *blk;
+
+    bdrv_drain_all();
+
+    unset_dirty_tracking();
+
+    blk_mig_lock();
+    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
+        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
+        bdrv_op_unblock_all(bmds->bs, bmds->blocker);
+        error_free(bmds->blocker);
+        bdrv_unref(bmds->bs);
+        g_free(bmds->aio_bitmap);
+        g_free(bmds);
+    }
+
+    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
+        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
+        g_free(blk->buf);
+        g_free(blk);
+    }
+    blk_mig_unlock();
+}
+
+static void block_migration_cancel(void *opaque)
+{
+    blk_mig_cleanup();
+}
+
+static int block_save_setup(QEMUFile *f, void *opaque)
+{
+    int ret;
+
+    DPRINTF("Enter save live setup submitted %d transferred %d\n",
+            block_mig_state.submitted, block_mig_state.transferred);
+
+    qemu_mutex_lock_iothread();
+    init_blk_migration(f);
+
+    /* start track dirty blocks */
+    ret = set_dirty_tracking();
+
+    if (ret) {
+        qemu_mutex_unlock_iothread();
+        return ret;
+    }
+
+    qemu_mutex_unlock_iothread();
+
+    ret = flush_blks(f);
+    blk_mig_reset_dirty_cursor();
+    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
+
+    return ret;
+}
+
+static int block_save_iterate(QEMUFile *f, void *opaque)
+{
+    int ret;
+    int64_t last_ftell = qemu_ftell(f);
+    int64_t delta_ftell;
+
+    DPRINTF("Enter save live iterate submitted %d transferred %d\n",
+            block_mig_state.submitted, block_mig_state.transferred);
+
+    ret = flush_blks(f);
+    if (ret) {
+        return ret;
+    }
+
+    blk_mig_reset_dirty_cursor();
+
+    /* control the rate of transfer */
+    blk_mig_lock();
+    while ((block_mig_state.submitted +
+            block_mig_state.read_done) * BLOCK_SIZE <
+           qemu_file_get_rate_limit(f)) {
+        blk_mig_unlock();
+        if (block_mig_state.bulk_completed == 0) {
+            /* first finish the bulk phase */
+            if (blk_mig_save_bulked_block(f) == 0) {
+                /* finished saving bulk on all devices */
+                block_mig_state.bulk_completed = 1;
+            }
+            ret = 0;
+        } else {
+            /* Always called with iothread lock taken for
+             * simplicity, block_save_complete also calls it.
+             */
+            qemu_mutex_lock_iothread();
+            ret = blk_mig_save_dirty_block(f, 1);
+            qemu_mutex_unlock_iothread();
+        }
+        if (ret < 0) {
+            return ret;
+        }
+        blk_mig_lock();
+        if (ret != 0) {
+            /* no more dirty blocks */
+            break;
+        }
+    }
+    blk_mig_unlock();
+
+    ret = flush_blks(f);
+    if (ret) {
+        return ret;
+    }
+
+    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
+    delta_ftell = qemu_ftell(f) - last_ftell;
+    if (delta_ftell > 0) {
+        return 1;
+    } else if (delta_ftell < 0) {
+        return -1;
+    } else {
+        return 0;
+    }
+}
+
+/* Called with iothread lock taken.  */
+
+static int block_save_complete(QEMUFile *f, void *opaque)
+{
+    int ret;
+
+    DPRINTF("Enter save live complete submitted %d transferred %d\n",
+            block_mig_state.submitted, block_mig_state.transferred);
+
+    ret = flush_blks(f);
+    if (ret) {
+        return ret;
+    }
+
+    blk_mig_reset_dirty_cursor();
+
+    /* we know for sure that save bulk is completed and
+       all async read completed */
+    blk_mig_lock();
+    assert(block_mig_state.submitted == 0);
+    blk_mig_unlock();
+
+    do {
+        ret = blk_mig_save_dirty_block(f, 0);
+        if (ret < 0) {
+            return ret;
+        }
+    } while (ret == 0);
+
+    /* report completion */
+    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
+
+    DPRINTF("Block migration completed\n");
+
+    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
+
+    blk_mig_cleanup();
+    return 0;
+}
+
+static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
+{
+    /* Estimate pending number of bytes to send */
+    uint64_t pending;
+
+    qemu_mutex_lock_iothread();
+    blk_mig_lock();
+    pending = get_remaining_dirty() +
+                       block_mig_state.submitted * BLOCK_SIZE +
+                       block_mig_state.read_done * BLOCK_SIZE;
+
+    /* Report at least one block pending during bulk phase */
+    if (pending == 0 && !block_mig_state.bulk_completed) {
+        pending = BLOCK_SIZE;
+    }
+    blk_mig_unlock();
+    qemu_mutex_unlock_iothread();
+
+    DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
+    return pending;
+}
+
+static int block_load(QEMUFile *f, void *opaque, int version_id)
+{
+    static int banner_printed;
+    int len, flags;
+    char device_name[256];
+    int64_t addr;
+    BlockDriverState *bs, *bs_prev = NULL;
+    uint8_t *buf;
+    int64_t total_sectors = 0;
+    int nr_sectors;
+    int ret;
+
+    do {
+        addr = qemu_get_be64(f);
+
+        flags = addr & ~BDRV_SECTOR_MASK;
+        addr >>= BDRV_SECTOR_BITS;
+
+        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
+            /* get device name */
+            len = qemu_get_byte(f);
+            qemu_get_buffer(f, (uint8_t *)device_name, len);
+            device_name[len] = '\0';
+
+            bs = bdrv_find(device_name);
+            if (!bs) {
+                fprintf(stderr, "Error unknown block device %s\n",
+                        device_name);
+                return -EINVAL;
+            }
+
+            if (bs != bs_prev) {
+                bs_prev = bs;
+                total_sectors = bdrv_nb_sectors(bs);
+                if (total_sectors <= 0) {
+                    error_report("Error getting length of block device %s",
+                                 device_name);
+                    return -EINVAL;
+                }
+            }
+
+            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
+                nr_sectors = total_sectors - addr;
+            } else {
+                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
+            }
+
+            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+                ret = bdrv_write_zeroes(bs, addr, nr_sectors,
+                                        BDRV_REQ_MAY_UNMAP);
+            } else {
+                buf = g_malloc(BLOCK_SIZE);
+                qemu_get_buffer(f, buf, BLOCK_SIZE);
+                ret = bdrv_write(bs, addr, buf, nr_sectors);
+                g_free(buf);
+            }
+
+            if (ret < 0) {
+                return ret;
+            }
+        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
+            if (!banner_printed) {
+                printf("Receiving block device images\n");
+                banner_printed = 1;
+            }
+            printf("Completed %d %%%c", (int)addr,
+                   (addr == 100) ? '\n' : '\r');
+            fflush(stdout);
+        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
+            fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
+            return -EINVAL;
+        }
+        ret = qemu_file_get_error(f);
+        if (ret != 0) {
+            return ret;
+        }
+    } while (!(flags & BLK_MIG_FLAG_EOS));
+
+    return 0;
+}
+
+static void block_set_params(const MigrationParams *params, void *opaque)
+{
+    block_mig_state.blk_enable = params->blk;
+    block_mig_state.shared_base = params->shared;
+
+    /* shared base means that blk_enable = 1 */
+    block_mig_state.blk_enable |= params->shared;
+}
+
+static bool block_is_active(void *opaque)
+{
+    return block_mig_state.blk_enable == 1;
+}
+
+static SaveVMHandlers savevm_block_handlers = {
+    .set_params = block_set_params,
+    .save_live_setup = block_save_setup,
+    .save_live_iterate = block_save_iterate,
+    .save_live_complete = block_save_complete,
+    .save_live_pending = block_save_pending,
+    .load_state = block_load,
+    .cancel = block_migration_cancel,
+    .is_active = block_is_active,
+};
+
+void blk_mig_init(void)
+{
+    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
+    QSIMPLEQ_INIT(&block_mig_state.blk_list);
+    qemu_mutex_init(&block_mig_state.lock);
+
+    register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
+                         &block_mig_state);
+}
diff --git a/migration/migration-exec.c b/migration/migration-exec.c
new file mode 100644 (file)
index 0000000..4790247
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * QEMU live migration
+ *
+ * Copyright IBM, Corp. 2008
+ * Copyright Dell MessageOne 2008
+ *
+ * Authors:
+ *  Anthony Liguori   <aliguori@us.ibm.com>
+ *  Charles Duffy     <charles_duffy@messageone.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu-common.h"
+#include "qemu/sockets.h"
+#include "qemu/main-loop.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "block/block.h"
+#include <sys/types.h>
+#include <sys/wait.h>
+
+//#define DEBUG_MIGRATION_EXEC
+
+#ifdef DEBUG_MIGRATION_EXEC
+#define DPRINTF(fmt, ...) \
+    do { printf("migration-exec: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp)
+{
+    s->file = qemu_popen_cmd(command, "w");
+    if (s->file == NULL) {
+        error_setg_errno(errp, errno, "failed to popen the migration target");
+        return;
+    }
+
+    migrate_fd_connect(s);
+}
+
+static void exec_accept_incoming_migration(void *opaque)
+{
+    QEMUFile *f = opaque;
+
+    qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL);
+    process_incoming_migration(f);
+}
+
+void exec_start_incoming_migration(const char *command, Error **errp)
+{
+    QEMUFile *f;
+
+    DPRINTF("Attempting to start an incoming migration\n");
+    f = qemu_popen_cmd(command, "r");
+    if(f == NULL) {
+        error_setg_errno(errp, errno, "failed to popen the migration source");
+        return;
+    }
+
+    qemu_set_fd_handler2(qemu_get_fd(f), NULL,
+                        exec_accept_incoming_migration, NULL, f);
+}
diff --git a/migration/migration-fd.c b/migration/migration-fd.c
new file mode 100644 (file)
index 0000000..d2e523a
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * QEMU live migration via generic fd
+ *
+ * Copyright Red Hat, Inc. 2009
+ *
+ * Authors:
+ *  Chris Lalancette <clalance@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu-common.h"
+#include "qemu/main-loop.h"
+#include "qemu/sockets.h"
+#include "migration/migration.h"
+#include "monitor/monitor.h"
+#include "migration/qemu-file.h"
+#include "block/block.h"
+
+//#define DEBUG_MIGRATION_FD
+
+#ifdef DEBUG_MIGRATION_FD
+#define DPRINTF(fmt, ...) \
+    do { printf("migration-fd: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp)
+{
+    int fd = monitor_get_fd(cur_mon, fdname, errp);
+    if (fd == -1) {
+        return;
+    }
+    s->file = qemu_fdopen(fd, "wb");
+
+    migrate_fd_connect(s);
+}
+
+static void fd_accept_incoming_migration(void *opaque)
+{
+    QEMUFile *f = opaque;
+
+    qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL);
+    process_incoming_migration(f);
+}
+
+void fd_start_incoming_migration(const char *infd, Error **errp)
+{
+    int fd;
+    QEMUFile *f;
+
+    DPRINTF("Attempting to start an incoming migration via fd\n");
+
+    fd = strtol(infd, NULL, 0);
+    f = qemu_fdopen(fd, "rb");
+    if(f == NULL) {
+        error_setg_errno(errp, errno, "failed to open the source descriptor");
+        return;
+    }
+
+    qemu_set_fd_handler2(fd, NULL, fd_accept_incoming_migration, NULL, f);
+}
diff --git a/migration/migration-rdma.c b/migration/migration-rdma.c
new file mode 100644 (file)
index 0000000..b32dbdf
--- /dev/null
@@ -0,0 +1,3438 @@
+/*
+ * RDMA protocol and interfaces
+ *
+ * Copyright IBM, Corp. 2010-2013
+ *
+ * Authors:
+ *  Michael R. Hines <mrhines@us.ibm.com>
+ *  Jiuxing Liu <jl@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later.  See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu-common.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "exec/cpu-common.h"
+#include "qemu/main-loop.h"
+#include "qemu/sockets.h"
+#include "qemu/bitmap.h"
+#include "block/coroutine.h"
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <arpa/inet.h>
+#include <string.h>
+#include <rdma/rdma_cma.h>
+
+//#define DEBUG_RDMA
+//#define DEBUG_RDMA_VERBOSE
+//#define DEBUG_RDMA_REALLY_VERBOSE
+
+#ifdef DEBUG_RDMA
+#define DPRINTF(fmt, ...) \
+    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+#ifdef DEBUG_RDMA_VERBOSE
+#define DDPRINTF(fmt, ...) \
+    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DDPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+#ifdef DEBUG_RDMA_REALLY_VERBOSE
+#define DDDPRINTF(fmt, ...) \
+    do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DDDPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+/*
+ * Print and error on both the Monitor and the Log file.
+ */
+#define ERROR(errp, fmt, ...) \
+    do { \
+        fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
+        if (errp && (*(errp) == NULL)) { \
+            error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
+        } \
+    } while (0)
+
+#define RDMA_RESOLVE_TIMEOUT_MS 10000
+
+/* Do not merge data if larger than this. */
+#define RDMA_MERGE_MAX (2 * 1024 * 1024)
+#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
+
+#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
+
+/*
+ * This is only for non-live state being migrated.
+ * Instead of RDMA_WRITE messages, we use RDMA_SEND
+ * messages for that state, which requires a different
+ * delivery design than main memory.
+ */
+#define RDMA_SEND_INCREMENT 32768
+
+/*
+ * Maximum size infiniband SEND message
+ */
+#define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
+#define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
+
+#define RDMA_CONTROL_VERSION_CURRENT 1
+/*
+ * Capabilities for negotiation.
+ */
+#define RDMA_CAPABILITY_PIN_ALL 0x01
+
+/*
+ * Add the other flags above to this list of known capabilities
+ * as they are introduced.
+ */
+static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
+
+#define CHECK_ERROR_STATE() \
+    do { \
+        if (rdma->error_state) { \
+            if (!rdma->error_reported) { \
+                fprintf(stderr, "RDMA is in an error state waiting migration" \
+                                " to abort!\n"); \
+                rdma->error_reported = 1; \
+            } \
+            return rdma->error_state; \
+        } \
+    } while (0);
+
+/*
+ * A work request ID is 64-bits and we split up these bits
+ * into 3 parts:
+ *
+ * bits 0-15 : type of control message, 2^16
+ * bits 16-29: ram block index, 2^14
+ * bits 30-63: ram block chunk number, 2^34
+ *
+ * The last two bit ranges are only used for RDMA writes,
+ * in order to track their completion and potentially
+ * also track unregistration status of the message.
+ */
+#define RDMA_WRID_TYPE_SHIFT  0UL
+#define RDMA_WRID_BLOCK_SHIFT 16UL
+#define RDMA_WRID_CHUNK_SHIFT 30UL
+
+#define RDMA_WRID_TYPE_MASK \
+    ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
+
+#define RDMA_WRID_BLOCK_MASK \
+    (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
+
+#define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
+
+/*
+ * RDMA migration protocol:
+ * 1. RDMA Writes (data messages, i.e. RAM)
+ * 2. IB Send/Recv (control channel messages)
+ */
+enum {
+    RDMA_WRID_NONE = 0,
+    RDMA_WRID_RDMA_WRITE = 1,
+    RDMA_WRID_SEND_CONTROL = 2000,
+    RDMA_WRID_RECV_CONTROL = 4000,
+};
+
+const char *wrid_desc[] = {
+    [RDMA_WRID_NONE] = "NONE",
+    [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
+    [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
+    [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV",
+};
+
+/*
+ * Work request IDs for IB SEND messages only (not RDMA writes).
+ * This is used by the migration protocol to transmit
+ * control messages (such as device state and registration commands)
+ *
+ * We could use more WRs, but we have enough for now.
+ */
+enum {
+    RDMA_WRID_READY = 0,
+    RDMA_WRID_DATA,
+    RDMA_WRID_CONTROL,
+    RDMA_WRID_MAX,
+};
+
+/*
+ * SEND/RECV IB Control Messages.
+ */
+enum {
+    RDMA_CONTROL_NONE = 0,
+    RDMA_CONTROL_ERROR,
+    RDMA_CONTROL_READY,               /* ready to receive */
+    RDMA_CONTROL_QEMU_FILE,           /* QEMUFile-transmitted bytes */
+    RDMA_CONTROL_RAM_BLOCKS_REQUEST,  /* RAMBlock synchronization */
+    RDMA_CONTROL_RAM_BLOCKS_RESULT,   /* RAMBlock synchronization */
+    RDMA_CONTROL_COMPRESS,            /* page contains repeat values */
+    RDMA_CONTROL_REGISTER_REQUEST,    /* dynamic page registration */
+    RDMA_CONTROL_REGISTER_RESULT,     /* key to use after registration */
+    RDMA_CONTROL_REGISTER_FINISHED,   /* current iteration finished */
+    RDMA_CONTROL_UNREGISTER_REQUEST,  /* dynamic UN-registration */
+    RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
+};
+
+const char *control_desc[] = {
+    [RDMA_CONTROL_NONE] = "NONE",
+    [RDMA_CONTROL_ERROR] = "ERROR",
+    [RDMA_CONTROL_READY] = "READY",
+    [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE",
+    [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST",
+    [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT",
+    [RDMA_CONTROL_COMPRESS] = "COMPRESS",
+    [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST",
+    [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT",
+    [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED",
+    [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST",
+    [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED",
+};
+
+/*
+ * Memory and MR structures used to represent an IB Send/Recv work request.
+ * This is *not* used for RDMA writes, only IB Send/Recv.
+ */
+typedef struct {
+    uint8_t  control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */
+    struct   ibv_mr *control_mr;               /* registration metadata */
+    size_t   control_len;                      /* length of the message */
+    uint8_t *control_curr;                     /* start of unconsumed bytes */
+} RDMAWorkRequestData;
+
+/*
+ * Negotiate RDMA capabilities during connection-setup time.
+ */
+typedef struct {
+    uint32_t version;
+    uint32_t flags;
+} RDMACapabilities;
+
+static void caps_to_network(RDMACapabilities *cap)
+{
+    cap->version = htonl(cap->version);
+    cap->flags = htonl(cap->flags);
+}
+
+static void network_to_caps(RDMACapabilities *cap)
+{
+    cap->version = ntohl(cap->version);
+    cap->flags = ntohl(cap->flags);
+}
+
+/*
+ * Representation of a RAMBlock from an RDMA perspective.
+ * This is not transmitted, only local.
+ * This and subsequent structures cannot be linked lists
+ * because we're using a single IB message to transmit
+ * the information. It's small anyway, so a list is overkill.
+ */
+typedef struct RDMALocalBlock {
+    uint8_t  *local_host_addr; /* local virtual address */
+    uint64_t remote_host_addr; /* remote virtual address */
+    uint64_t offset;
+    uint64_t length;
+    struct   ibv_mr **pmr;     /* MRs for chunk-level registration */
+    struct   ibv_mr *mr;       /* MR for non-chunk-level registration */
+    uint32_t *remote_keys;     /* rkeys for chunk-level registration */
+    uint32_t remote_rkey;      /* rkeys for non-chunk-level registration */
+    int      index;            /* which block are we */
+    bool     is_ram_block;
+    int      nb_chunks;
+    unsigned long *transit_bitmap;
+    unsigned long *unregister_bitmap;
+} RDMALocalBlock;
+
+/*
+ * Also represents a RAMblock, but only on the dest.
+ * This gets transmitted by the dest during connection-time
+ * to the source VM and then is used to populate the
+ * corresponding RDMALocalBlock with
+ * the information needed to perform the actual RDMA.
+ */
+typedef struct QEMU_PACKED RDMARemoteBlock {
+    uint64_t remote_host_addr;
+    uint64_t offset;
+    uint64_t length;
+    uint32_t remote_rkey;
+    uint32_t padding;
+} RDMARemoteBlock;
+
+static uint64_t htonll(uint64_t v)
+{
+    union { uint32_t lv[2]; uint64_t llv; } u;
+    u.lv[0] = htonl(v >> 32);
+    u.lv[1] = htonl(v & 0xFFFFFFFFULL);
+    return u.llv;
+}
+
+static uint64_t ntohll(uint64_t v) {
+    union { uint32_t lv[2]; uint64_t llv; } u;
+    u.llv = v;
+    return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]);
+}
+
+static void remote_block_to_network(RDMARemoteBlock *rb)
+{
+    rb->remote_host_addr = htonll(rb->remote_host_addr);
+    rb->offset = htonll(rb->offset);
+    rb->length = htonll(rb->length);
+    rb->remote_rkey = htonl(rb->remote_rkey);
+}
+
+static void network_to_remote_block(RDMARemoteBlock *rb)
+{
+    rb->remote_host_addr = ntohll(rb->remote_host_addr);
+    rb->offset = ntohll(rb->offset);
+    rb->length = ntohll(rb->length);
+    rb->remote_rkey = ntohl(rb->remote_rkey);
+}
+
+/*
+ * Virtual address of the above structures used for transmitting
+ * the RAMBlock descriptions at connection-time.
+ * This structure is *not* transmitted.
+ */
+typedef struct RDMALocalBlocks {
+    int nb_blocks;
+    bool     init;             /* main memory init complete */
+    RDMALocalBlock *block;
+} RDMALocalBlocks;
+
+/*
+ * Main data structure for RDMA state.
+ * While there is only one copy of this structure being allocated right now,
+ * this is the place where one would start if you wanted to consider
+ * having more than one RDMA connection open at the same time.
+ */
+typedef struct RDMAContext {
+    char *host;
+    int port;
+
+    RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
+
+    /*
+     * This is used by *_exchange_send() to figure out whether or not
+     * the initial "READY" message has already been received or not.
+     * This is because other functions may potentially poll() and detect
+     * the READY message before send() does, in which case we need to
+     * know if it completed.
+     */
+    int control_ready_expected;
+
+    /* number of outstanding writes */
+    int nb_sent;
+
+    /* store info about current buffer so that we can
+       merge it with future sends */
+    uint64_t current_addr;
+    uint64_t current_length;
+    /* index of ram block the current buffer belongs to */
+    int current_index;
+    /* index of the chunk in the current ram block */
+    int current_chunk;
+
+    bool pin_all;
+
+    /*
+     * infiniband-specific variables for opening the device
+     * and maintaining connection state and so forth.
+     *
+     * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
+     * cm_id->verbs, cm_id->channel, and cm_id->qp.
+     */
+    struct rdma_cm_id *cm_id;               /* connection manager ID */
+    struct rdma_cm_id *listen_id;
+    bool connected;
+
+    struct ibv_context          *verbs;
+    struct rdma_event_channel   *channel;
+    struct ibv_qp *qp;                      /* queue pair */
+    struct ibv_comp_channel *comp_channel;  /* completion channel */
+    struct ibv_pd *pd;                      /* protection domain */
+    struct ibv_cq *cq;                      /* completion queue */
+
+    /*
+     * If a previous write failed (perhaps because of a failed
+     * memory registration, then do not attempt any future work
+     * and remember the error state.
+     */
+    int error_state;
+    int error_reported;
+
+    /*
+     * Description of ram blocks used throughout the code.
+     */
+    RDMALocalBlocks local_ram_blocks;
+    RDMARemoteBlock *block;
+
+    /*
+     * Migration on *destination* started.
+     * Then use coroutine yield function.
+     * Source runs in a thread, so we don't care.
+     */
+    int migration_started_on_destination;
+
+    int total_registrations;
+    int total_writes;
+
+    int unregister_current, unregister_next;
+    uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
+
+    GHashTable *blockmap;
+} RDMAContext;
+
+/*
+ * Interface to the rest of the migration call stack.
+ */
+typedef struct QEMUFileRDMA {
+    RDMAContext *rdma;
+    size_t len;
+    void *file;
+} QEMUFileRDMA;
+
+/*
+ * Main structure for IB Send/Recv control messages.
+ * This gets prepended at the beginning of every Send/Recv.
+ */
+typedef struct QEMU_PACKED {
+    uint32_t len;     /* Total length of data portion */
+    uint32_t type;    /* which control command to perform */
+    uint32_t repeat;  /* number of commands in data portion of same type */
+    uint32_t padding;
+} RDMAControlHeader;
+
+static void control_to_network(RDMAControlHeader *control)
+{
+    control->type = htonl(control->type);
+    control->len = htonl(control->len);
+    control->repeat = htonl(control->repeat);
+}
+
+static void network_to_control(RDMAControlHeader *control)
+{
+    control->type = ntohl(control->type);
+    control->len = ntohl(control->len);
+    control->repeat = ntohl(control->repeat);
+}
+
+/*
+ * Register a single Chunk.
+ * Information sent by the source VM to inform the dest
+ * to register an single chunk of memory before we can perform
+ * the actual RDMA operation.
+ */
+typedef struct QEMU_PACKED {
+    union QEMU_PACKED {
+        uint64_t current_addr;  /* offset into the ramblock of the chunk */
+        uint64_t chunk;         /* chunk to lookup if unregistering */
+    } key;
+    uint32_t current_index; /* which ramblock the chunk belongs to */
+    uint32_t padding;
+    uint64_t chunks;            /* how many sequential chunks to register */
+} RDMARegister;
+
+static void register_to_network(RDMARegister *reg)
+{
+    reg->key.current_addr = htonll(reg->key.current_addr);
+    reg->current_index = htonl(reg->current_index);
+    reg->chunks = htonll(reg->chunks);
+}
+
+static void network_to_register(RDMARegister *reg)
+{
+    reg->key.current_addr = ntohll(reg->key.current_addr);
+    reg->current_index = ntohl(reg->current_index);
+    reg->chunks = ntohll(reg->chunks);
+}
+
+typedef struct QEMU_PACKED {
+    uint32_t value;     /* if zero, we will madvise() */
+    uint32_t block_idx; /* which ram block index */
+    uint64_t offset;    /* where in the remote ramblock this chunk */
+    uint64_t length;    /* length of the chunk */
+} RDMACompress;
+
+static void compress_to_network(RDMACompress *comp)
+{
+    comp->value = htonl(comp->value);
+    comp->block_idx = htonl(comp->block_idx);
+    comp->offset = htonll(comp->offset);
+    comp->length = htonll(comp->length);
+}
+
+static void network_to_compress(RDMACompress *comp)
+{
+    comp->value = ntohl(comp->value);
+    comp->block_idx = ntohl(comp->block_idx);
+    comp->offset = ntohll(comp->offset);
+    comp->length = ntohll(comp->length);
+}
+
+/*
+ * The result of the dest's memory registration produces an "rkey"
+ * which the source VM must reference in order to perform
+ * the RDMA operation.
+ */
+typedef struct QEMU_PACKED {
+    uint32_t rkey;
+    uint32_t padding;
+    uint64_t host_addr;
+} RDMARegisterResult;
+
+static void result_to_network(RDMARegisterResult *result)
+{
+    result->rkey = htonl(result->rkey);
+    result->host_addr = htonll(result->host_addr);
+};
+
+static void network_to_result(RDMARegisterResult *result)
+{
+    result->rkey = ntohl(result->rkey);
+    result->host_addr = ntohll(result->host_addr);
+};
+
+const char *print_wrid(int wrid);
+static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
+                                   uint8_t *data, RDMAControlHeader *resp,
+                                   int *resp_idx,
+                                   int (*callback)(RDMAContext *rdma));
+
+static inline uint64_t ram_chunk_index(const uint8_t *start,
+                                       const uint8_t *host)
+{
+    return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
+}
+
+static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
+                                       uint64_t i)
+{
+    return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
+                                    + (i << RDMA_REG_CHUNK_SHIFT));
+}
+
+static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
+                                     uint64_t i)
+{
+    uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
+                                         (1UL << RDMA_REG_CHUNK_SHIFT);
+
+    if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) {
+        result = rdma_ram_block->local_host_addr + rdma_ram_block->length;
+    }
+
+    return result;
+}
+
+static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr,
+                         ram_addr_t block_offset, uint64_t length)
+{
+    RDMALocalBlocks *local = &rdma->local_ram_blocks;
+    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
+        (void *) block_offset);
+    RDMALocalBlock *old = local->block;
+
+    assert(block == NULL);
+
+    local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1));
+
+    if (local->nb_blocks) {
+        int x;
+
+        for (x = 0; x < local->nb_blocks; x++) {
+            g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
+            g_hash_table_insert(rdma->blockmap, (void *)old[x].offset,
+                                                &local->block[x]);
+        }
+        memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks);
+        g_free(old);
+    }
+
+    block = &local->block[local->nb_blocks];
+
+    block->local_host_addr = host_addr;
+    block->offset = block_offset;
+    block->length = length;
+    block->index = local->nb_blocks;
+    block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL;
+    block->transit_bitmap = bitmap_new(block->nb_chunks);
+    bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
+    block->unregister_bitmap = bitmap_new(block->nb_chunks);
+    bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
+    block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t));
+
+    block->is_ram_block = local->init ? false : true;
+
+    g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);
+
+    DDPRINTF("Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
+           " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
+            local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
+            block->length, (uint64_t) (block->local_host_addr + block->length),
+                BITS_TO_LONGS(block->nb_chunks) *
+                    sizeof(unsigned long) * 8, block->nb_chunks);
+
+    local->nb_blocks++;
+
+    return 0;
+}
+
+/*
+ * Memory regions need to be registered with the device and queue pairs setup
+ * in advanced before the migration starts. This tells us where the RAM blocks
+ * are so that we can register them individually.
+ */
+static void qemu_rdma_init_one_block(void *host_addr,
+    ram_addr_t block_offset, ram_addr_t length, void *opaque)
+{
+    __qemu_rdma_add_block(opaque, host_addr, block_offset, length);
+}
+
+/*
+ * Identify the RAMBlocks and their quantity. They will be references to
+ * identify chunk boundaries inside each RAMBlock and also be referenced
+ * during dynamic page registration.
+ */
+static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
+{
+    RDMALocalBlocks *local = &rdma->local_ram_blocks;
+
+    assert(rdma->blockmap == NULL);
+    rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
+    memset(local, 0, sizeof *local);
+    qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
+    DPRINTF("Allocated %d local ram block structures\n", local->nb_blocks);
+    rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) *
+                        rdma->local_ram_blocks.nb_blocks);
+    local->init = true;
+    return 0;
+}
+
+static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset)
+{
+    RDMALocalBlocks *local = &rdma->local_ram_blocks;
+    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
+        (void *) block_offset);
+    RDMALocalBlock *old = local->block;
+    int x;
+
+    assert(block);
+
+    if (block->pmr) {
+        int j;
+
+        for (j = 0; j < block->nb_chunks; j++) {
+            if (!block->pmr[j]) {
+                continue;
+            }
+            ibv_dereg_mr(block->pmr[j]);
+            rdma->total_registrations--;
+        }
+        g_free(block->pmr);
+        block->pmr = NULL;
+    }
+
+    if (block->mr) {
+        ibv_dereg_mr(block->mr);
+        rdma->total_registrations--;
+        block->mr = NULL;
+    }
+
+    g_free(block->transit_bitmap);
+    block->transit_bitmap = NULL;
+
+    g_free(block->unregister_bitmap);
+    block->unregister_bitmap = NULL;
+
+    g_free(block->remote_keys);
+    block->remote_keys = NULL;
+
+    for (x = 0; x < local->nb_blocks; x++) {
+        g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
+    }
+
+    if (local->nb_blocks > 1) {
+
+        local->block = g_malloc0(sizeof(RDMALocalBlock) *
+                                    (local->nb_blocks - 1));
+
+        if (block->index) {
+            memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
+        }
+
+        if (block->index < (local->nb_blocks - 1)) {
+            memcpy(local->block + block->index, old + (block->index + 1),
+                sizeof(RDMALocalBlock) *
+                    (local->nb_blocks - (block->index + 1)));
+        }
+    } else {
+        assert(block == local->block);
+        local->block = NULL;
+    }
+
+    DDPRINTF("Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64
+           " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n",
+            local->nb_blocks, (uint64_t) block->local_host_addr, block->offset,
+            block->length, (uint64_t) (block->local_host_addr + block->length),
+                BITS_TO_LONGS(block->nb_chunks) *
+                    sizeof(unsigned long) * 8, block->nb_chunks);
+
+    g_free(old);
+
+    local->nb_blocks--;
+
+    if (local->nb_blocks) {
+        for (x = 0; x < local->nb_blocks; x++) {
+            g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset,
+                                                &local->block[x]);
+        }
+    }
+
+    return 0;
+}
+
+/*
+ * Put in the log file which RDMA device was opened and the details
+ * associated with that device.
+ */
+static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
+{
+    struct ibv_port_attr port;
+
+    if (ibv_query_port(verbs, 1, &port)) {
+        fprintf(stderr, "FAILED TO QUERY PORT INFORMATION!\n");
+        return;
+    }
+
+    printf("%s RDMA Device opened: kernel name %s "
+           "uverbs device name %s, "
+           "infiniband_verbs class device path %s, "
+           "infiniband class device path %s, "
+           "transport: (%d) %s\n",
+                who,
+                verbs->device->name,
+                verbs->device->dev_name,
+                verbs->device->dev_path,
+                verbs->device->ibdev_path,
+                port.link_layer,
+                (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" :
+                 ((port.link_layer == IBV_LINK_LAYER_ETHERNET) 
+                    ? "Ethernet" : "Unknown"));
+}
+
+/*
+ * Put in the log file the RDMA gid addressing information,
+ * useful for folks who have trouble understanding the
+ * RDMA device hierarchy in the kernel.
+ */
+static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
+{
+    char sgid[33];
+    char dgid[33];
+    inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
+    inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
+    DPRINTF("%s Source GID: %s, Dest GID: %s\n", who, sgid, dgid);
+}
+
+/*
+ * As of now, IPv6 over RoCE / iWARP is not supported by linux.
+ * We will try the next addrinfo struct, and fail if there are
+ * no other valid addresses to bind against.
+ *
+ * If user is listening on '[::]', then we will not have a opened a device
+ * yet and have no way of verifying if the device is RoCE or not.
+ *
+ * In this case, the source VM will throw an error for ALL types of
+ * connections (both IPv4 and IPv6) if the destination machine does not have
+ * a regular infiniband network available for use.
+ *
+ * The only way to guarantee that an error is thrown for broken kernels is
+ * for the management software to choose a *specific* interface at bind time
+ * and validate what time of hardware it is.
+ *
+ * Unfortunately, this puts the user in a fix:
+ * 
+ *  If the source VM connects with an IPv4 address without knowing that the
+ *  destination has bound to '[::]' the migration will unconditionally fail
+ *  unless the management software is explicitly listening on the the IPv4
+ *  address while using a RoCE-based device.
+ *
+ *  If the source VM connects with an IPv6 address, then we're OK because we can
+ *  throw an error on the source (and similarly on the destination).
+ * 
+ *  But in mixed environments, this will be broken for a while until it is fixed
+ *  inside linux.
+ *
+ * We do provide a *tiny* bit of help in this function: We can list all of the
+ * devices in the system and check to see if all the devices are RoCE or
+ * Infiniband. 
+ *
+ * If we detect that we have a *pure* RoCE environment, then we can safely
+ * thrown an error even if the management software has specified '[::]' as the
+ * bind address.
+ *
+ * However, if there is are multiple hetergeneous devices, then we cannot make
+ * this assumption and the user just has to be sure they know what they are
+ * doing.
+ *
+ * Patches are being reviewed on linux-rdma.
+ */
+static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs)
+{
+    struct ibv_port_attr port_attr;
+
+    /* This bug only exists in linux, to our knowledge. */
+#ifdef CONFIG_LINUX
+
+    /* 
+     * Verbs are only NULL if management has bound to '[::]'.
+     * 
+     * Let's iterate through all the devices and see if there any pure IB
+     * devices (non-ethernet).
+     * 
+     * If not, then we can safely proceed with the migration.
+     * Otherwise, there are no guarantees until the bug is fixed in linux.
+     */
+    if (!verbs) {
+           int num_devices, x;
+        struct ibv_device ** dev_list = ibv_get_device_list(&num_devices);
+        bool roce_found = false;
+        bool ib_found = false;
+
+        for (x = 0; x < num_devices; x++) {
+            verbs = ibv_open_device(dev_list[x]);
+
+            if (ibv_query_port(verbs, 1, &port_attr)) {
+                ibv_close_device(verbs);
+                ERROR(errp, "Could not query initial IB port");
+                return -EINVAL;
+            }
+
+            if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
+                ib_found = true;
+            } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
+                roce_found = true;
+            }
+
+            ibv_close_device(verbs);
+
+        }
+
+        if (roce_found) {
+            if (ib_found) {
+                fprintf(stderr, "WARN: migrations may fail:"
+                                " IPv6 over RoCE / iWARP in linux"
+                                " is broken. But since you appear to have a"
+                                " mixed RoCE / IB environment, be sure to only"
+                                " migrate over the IB fabric until the kernel "
+                                " fixes the bug.\n");
+            } else {
+                ERROR(errp, "You only have RoCE / iWARP devices in your systems"
+                            " and your management software has specified '[::]'"
+                            ", but IPv6 over RoCE / iWARP is not supported in Linux.");
+                return -ENONET;
+            }
+        }
+
+        return 0;
+    }
+
+    /*
+     * If we have a verbs context, that means that some other than '[::]' was
+     * used by the management software for binding. In which case we can actually 
+     * warn the user about a potential broken kernel;
+     */
+
+    /* IB ports start with 1, not 0 */
+    if (ibv_query_port(verbs, 1, &port_attr)) {
+        ERROR(errp, "Could not query initial IB port");
+        return -EINVAL;
+    }
+
+    if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
+        ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 "
+                    "(but patches on linux-rdma in progress)");
+        return -ENONET;
+    }
+
+#endif
+
+    return 0;
+}
+
+/*
+ * Figure out which RDMA device corresponds to the requested IP hostname
+ * Also create the initial connection manager identifiers for opening
+ * the connection.
+ */
+static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
+{
+    int ret;
+    struct rdma_addrinfo *res;
+    char port_str[16];
+    struct rdma_cm_event *cm_event;
+    char ip[40] = "unknown";
+    struct rdma_addrinfo *e;
+
+    if (rdma->host == NULL || !strcmp(rdma->host, "")) {
+        ERROR(errp, "RDMA hostname has not been set");
+        return -EINVAL;
+    }
+
+    /* create CM channel */
+    rdma->channel = rdma_create_event_channel();
+    if (!rdma->channel) {
+        ERROR(errp, "could not create CM channel");
+        return -EINVAL;
+    }
+
+    /* create CM id */
+    ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
+    if (ret) {
+        ERROR(errp, "could not create channel id");
+        goto err_resolve_create_id;
+    }
+
+    snprintf(port_str, 16, "%d", rdma->port);
+    port_str[15] = '\0';
+
+    ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
+    if (ret < 0) {
+        ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
+        goto err_resolve_get_addr;
+    }
+
+    for (e = res; e != NULL; e = e->ai_next) {
+        inet_ntop(e->ai_family,
+            &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
+        DPRINTF("Trying %s => %s\n", rdma->host, ip);
+
+        ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
+                RDMA_RESOLVE_TIMEOUT_MS);
+        if (!ret) {
+            if (e->ai_family == AF_INET6) {
+                ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
+                if (ret) {
+                    continue;
+                }
+            }
+            goto route;
+        }
+    }
+
+    ERROR(errp, "could not resolve address %s", rdma->host);
+    goto err_resolve_get_addr;
+
+route:
+    qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
+
+    ret = rdma_get_cm_event(rdma->channel, &cm_event);
+    if (ret) {
+        ERROR(errp, "could not perform event_addr_resolved");
+        goto err_resolve_get_addr;
+    }
+
+    if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
+        ERROR(errp, "result not equal to event_addr_resolved %s",
+                rdma_event_str(cm_event->event));
+        perror("rdma_resolve_addr");
+        rdma_ack_cm_event(cm_event);
+        ret = -EINVAL;
+        goto err_resolve_get_addr;
+    }
+    rdma_ack_cm_event(cm_event);
+
+    /* resolve route */
+    ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
+    if (ret) {
+        ERROR(errp, "could not resolve rdma route");
+        goto err_resolve_get_addr;
+    }
+
+    ret = rdma_get_cm_event(rdma->channel, &cm_event);
+    if (ret) {
+        ERROR(errp, "could not perform event_route_resolved");
+        goto err_resolve_get_addr;
+    }
+    if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
+        ERROR(errp, "result not equal to event_route_resolved: %s",
+                        rdma_event_str(cm_event->event));
+        rdma_ack_cm_event(cm_event);
+        ret = -EINVAL;
+        goto err_resolve_get_addr;
+    }
+    rdma_ack_cm_event(cm_event);
+    rdma->verbs = rdma->cm_id->verbs;
+    qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs);
+    qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id);
+    return 0;
+
+err_resolve_get_addr:
+    rdma_destroy_id(rdma->cm_id);
+    rdma->cm_id = NULL;
+err_resolve_create_id:
+    rdma_destroy_event_channel(rdma->channel);
+    rdma->channel = NULL;
+    return ret;
+}
+
+/*
+ * Create protection domain and completion queues
+ */
+static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
+{
+    /* allocate pd */
+    rdma->pd = ibv_alloc_pd(rdma->verbs);
+    if (!rdma->pd) {
+        fprintf(stderr, "failed to allocate protection domain\n");
+        return -1;
+    }
+
+    /* create completion channel */
+    rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
+    if (!rdma->comp_channel) {
+        fprintf(stderr, "failed to allocate completion channel\n");
+        goto err_alloc_pd_cq;
+    }
+
+    /*
+     * Completion queue can be filled by both read and write work requests,
+     * so must reflect the sum of both possible queue sizes.
+     */
+    rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
+            NULL, rdma->comp_channel, 0);
+    if (!rdma->cq) {
+        fprintf(stderr, "failed to allocate completion queue\n");
+        goto err_alloc_pd_cq;
+    }
+
+    return 0;
+
+err_alloc_pd_cq:
+    if (rdma->pd) {
+        ibv_dealloc_pd(rdma->pd);
+    }
+    if (rdma->comp_channel) {
+        ibv_destroy_comp_channel(rdma->comp_channel);
+    }
+    rdma->pd = NULL;
+    rdma->comp_channel = NULL;
+    return -1;
+
+}
+
+/*
+ * Create queue pairs.
+ */
+static int qemu_rdma_alloc_qp(RDMAContext *rdma)
+{
+    struct ibv_qp_init_attr attr = { 0 };
+    int ret;
+
+    attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX;
+    attr.cap.max_recv_wr = 3;
+    attr.cap.max_send_sge = 1;
+    attr.cap.max_recv_sge = 1;
+    attr.send_cq = rdma->cq;
+    attr.recv_cq = rdma->cq;
+    attr.qp_type = IBV_QPT_RC;
+
+    ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
+    if (ret) {
+        return -1;
+    }
+
+    rdma->qp = rdma->cm_id->qp;
+    return 0;
+}
+
+static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
+{
+    int i;
+    RDMALocalBlocks *local = &rdma->local_ram_blocks;
+
+    for (i = 0; i < local->nb_blocks; i++) {
+        local->block[i].mr =
+            ibv_reg_mr(rdma->pd,
+                    local->block[i].local_host_addr,
+                    local->block[i].length,
+                    IBV_ACCESS_LOCAL_WRITE |
+                    IBV_ACCESS_REMOTE_WRITE
+                    );
+        if (!local->block[i].mr) {
+            perror("Failed to register local dest ram block!\n");
+            break;
+        }
+        rdma->total_registrations++;
+    }
+
+    if (i >= local->nb_blocks) {
+        return 0;
+    }
+
+    for (i--; i >= 0; i--) {
+        ibv_dereg_mr(local->block[i].mr);
+        rdma->total_registrations--;
+    }
+
+    return -1;
+
+}
+
+/*
+ * Find the ram block that corresponds to the page requested to be
+ * transmitted by QEMU.
+ *
+ * Once the block is found, also identify which 'chunk' within that
+ * block that the page belongs to.
+ *
+ * This search cannot fail or the migration will fail.
+ */
+static int qemu_rdma_search_ram_block(RDMAContext *rdma,
+                                      uint64_t block_offset,
+                                      uint64_t offset,
+                                      uint64_t length,
+                                      uint64_t *block_index,
+                                      uint64_t *chunk_index)
+{
+    uint64_t current_addr = block_offset + offset;
+    RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
+                                                (void *) block_offset);
+    assert(block);
+    assert(current_addr >= block->offset);
+    assert((current_addr + length) <= (block->offset + block->length));
+
+    *block_index = block->index;
+    *chunk_index = ram_chunk_index(block->local_host_addr,
+                block->local_host_addr + (current_addr - block->offset));
+
+    return 0;
+}
+
+/*
+ * Register a chunk with IB. If the chunk was already registered
+ * previously, then skip.
+ *
+ * Also return the keys associated with the registration needed
+ * to perform the actual RDMA operation.
+ */
+static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
+        RDMALocalBlock *block, uint8_t *host_addr,
+        uint32_t *lkey, uint32_t *rkey, int chunk,
+        uint8_t *chunk_start, uint8_t *chunk_end)
+{
+    if (block->mr) {
+        if (lkey) {
+            *lkey = block->mr->lkey;
+        }
+        if (rkey) {
+            *rkey = block->mr->rkey;
+        }
+        return 0;
+    }
+
+    /* allocate memory to store chunk MRs */
+    if (!block->pmr) {
+        block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
+        if (!block->pmr) {
+            return -1;
+        }
+    }
+
+    /*
+     * If 'rkey', then we're the destination, so grant access to the source.
+     *
+     * If 'lkey', then we're the source VM, so grant access only to ourselves.
+     */
+    if (!block->pmr[chunk]) {
+        uint64_t len = chunk_end - chunk_start;
+
+        DDPRINTF("Registering %" PRIu64 " bytes @ %p\n",
+                 len, chunk_start);
+
+        block->pmr[chunk] = ibv_reg_mr(rdma->pd,
+                chunk_start, len,
+                (rkey ? (IBV_ACCESS_LOCAL_WRITE |
+                        IBV_ACCESS_REMOTE_WRITE) : 0));
+
+        if (!block->pmr[chunk]) {
+            perror("Failed to register chunk!");
+            fprintf(stderr, "Chunk details: block: %d chunk index %d"
+                            " start %" PRIu64 " end %" PRIu64 " host %" PRIu64
+                            " local %" PRIu64 " registrations: %d\n",
+                            block->index, chunk, (uint64_t) chunk_start,
+                            (uint64_t) chunk_end, (uint64_t) host_addr,
+                            (uint64_t) block->local_host_addr,
+                            rdma->total_registrations);
+            return -1;
+        }
+        rdma->total_registrations++;
+    }
+
+    if (lkey) {
+        *lkey = block->pmr[chunk]->lkey;
+    }
+    if (rkey) {
+        *rkey = block->pmr[chunk]->rkey;
+    }
+    return 0;
+}
+
+/*
+ * Register (at connection time) the memory used for control
+ * channel messages.
+ */
+static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
+{
+    rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd,
+            rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER,
+            IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+    if (rdma->wr_data[idx].control_mr) {
+        rdma->total_registrations++;
+        return 0;
+    }
+    fprintf(stderr, "qemu_rdma_reg_control failed!\n");
+    return -1;
+}
+
+const char *print_wrid(int wrid)
+{
+    if (wrid >= RDMA_WRID_RECV_CONTROL) {
+        return wrid_desc[RDMA_WRID_RECV_CONTROL];
+    }
+    return wrid_desc[wrid];
+}
+
+/*
+ * RDMA requires memory registration (mlock/pinning), but this is not good for
+ * overcommitment.
+ *
+ * In preparation for the future where LRU information or workload-specific
+ * writable writable working set memory access behavior is available to QEMU
+ * it would be nice to have in place the ability to UN-register/UN-pin
+ * particular memory regions from the RDMA hardware when it is determine that
+ * those regions of memory will likely not be accessed again in the near future.
+ *
+ * While we do not yet have such information right now, the following
+ * compile-time option allows us to perform a non-optimized version of this
+ * behavior.
+ *
+ * By uncommenting this option, you will cause *all* RDMA transfers to be
+ * unregistered immediately after the transfer completes on both sides of the
+ * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
+ *
+ * This will have a terrible impact on migration performance, so until future
+ * workload information or LRU information is available, do not attempt to use
+ * this feature except for basic testing.
+ */
+//#define RDMA_UNREGISTRATION_EXAMPLE
+
+/*
+ * Perform a non-optimized memory unregistration after every transfer
+ * for demonsration purposes, only if pin-all is not requested.
+ *
+ * Potential optimizations:
+ * 1. Start a new thread to run this function continuously
+        - for bit clearing
+        - and for receipt of unregister messages
+ * 2. Use an LRU.
+ * 3. Use workload hints.
+ */
+static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
+{
+    while (rdma->unregistrations[rdma->unregister_current]) {
+        int ret;
+        uint64_t wr_id = rdma->unregistrations[rdma->unregister_current];
+        uint64_t chunk =
+            (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
+        uint64_t index =
+            (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
+        RDMALocalBlock *block =
+            &(rdma->local_ram_blocks.block[index]);
+        RDMARegister reg = { .current_index = index };
+        RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED,
+                                 };
+        RDMAControlHeader head = { .len = sizeof(RDMARegister),
+                                   .type = RDMA_CONTROL_UNREGISTER_REQUEST,
+                                   .repeat = 1,
+                                 };
+
+        DDPRINTF("Processing unregister for chunk: %" PRIu64
+                 " at position %d\n", chunk, rdma->unregister_current);
+
+        rdma->unregistrations[rdma->unregister_current] = 0;
+        rdma->unregister_current++;
+
+        if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) {
+            rdma->unregister_current = 0;
+        }
+
+
+        /*
+         * Unregistration is speculative (because migration is single-threaded
+         * and we cannot break the protocol's inifinband message ordering).
+         * Thus, if the memory is currently being used for transmission,
+         * then abort the attempt to unregister and try again
+         * later the next time a completion is received for this memory.
+         */
+        clear_bit(chunk, block->unregister_bitmap);
+
+        if (test_bit(chunk, block->transit_bitmap)) {
+            DDPRINTF("Cannot unregister inflight chunk: %" PRIu64 "\n", chunk);
+            continue;
+        }
+
+        DDPRINTF("Sending unregister for chunk: %" PRIu64 "\n", chunk);
+
+        ret = ibv_dereg_mr(block->pmr[chunk]);
+        block->pmr[chunk] = NULL;
+        block->remote_keys[chunk] = 0;
+
+        if (ret != 0) {
+            perror("unregistration chunk failed");
+            return -ret;
+        }
+        rdma->total_registrations--;
+
+        reg.key.chunk = chunk;
+        register_to_network(&reg);
+        ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
+                                &resp, NULL, NULL);
+        if (ret < 0) {
+            return ret;
+        }
+
+        DDPRINTF("Unregister for chunk: %" PRIu64 " complete.\n", chunk);
+    }
+
+    return 0;
+}
+
+static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
+                                         uint64_t chunk)
+{
+    uint64_t result = wr_id & RDMA_WRID_TYPE_MASK;
+
+    result |= (index << RDMA_WRID_BLOCK_SHIFT);
+    result |= (chunk << RDMA_WRID_CHUNK_SHIFT);
+
+    return result;
+}
+
+/*
+ * Set bit for unregistration in the next iteration.
+ * We cannot transmit right here, but will unpin later.
+ */
+static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
+                                        uint64_t chunk, uint64_t wr_id)
+{
+    if (rdma->unregistrations[rdma->unregister_next] != 0) {
+        fprintf(stderr, "rdma migration: queue is full!\n");
+    } else {
+        RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
+
+        if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
+            DDPRINTF("Appending unregister chunk %" PRIu64
+                    " at position %d\n", chunk, rdma->unregister_next);
+
+            rdma->unregistrations[rdma->unregister_next++] =
+                    qemu_rdma_make_wrid(wr_id, index, chunk);
+
+            if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
+                rdma->unregister_next = 0;
+            }
+        } else {
+            DDPRINTF("Unregister chunk %" PRIu64 " already in queue.\n",
+                    chunk);
+        }
+    }
+}
+
+/*
+ * Consult the connection manager to see a work request
+ * (of any kind) has completed.
+ * Return the work request ID that completed.
+ */
+static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
+                               uint32_t *byte_len)
+{
+    int ret;
+    struct ibv_wc wc;
+    uint64_t wr_id;
+
+    ret = ibv_poll_cq(rdma->cq, 1, &wc);
+
+    if (!ret) {
+        *wr_id_out = RDMA_WRID_NONE;
+        return 0;
+    }
+
+    if (ret < 0) {
+        fprintf(stderr, "ibv_poll_cq return %d!\n", ret);
+        return ret;
+    }
+
+    wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
+
+    if (wc.status != IBV_WC_SUCCESS) {
+        fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n",
+                        wc.status, ibv_wc_status_str(wc.status));
+        fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]);
+
+        return -1;
+    }
+
+    if (rdma->control_ready_expected &&
+        (wr_id >= RDMA_WRID_RECV_CONTROL)) {
+        DDDPRINTF("completion %s #%" PRId64 " received (%" PRId64 ")"
+                  " left %d\n", wrid_desc[RDMA_WRID_RECV_CONTROL],
+                  wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
+        rdma->control_ready_expected = 0;
+    }
+
+    if (wr_id == RDMA_WRID_RDMA_WRITE) {
+        uint64_t chunk =
+            (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
+        uint64_t index =
+            (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
+        RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
+
+        DDDPRINTF("completions %s (%" PRId64 ") left %d, "
+                 "block %" PRIu64 ", chunk: %" PRIu64 " %p %p\n",
+                 print_wrid(wr_id), wr_id, rdma->nb_sent, index, chunk,
+                 block->local_host_addr, (void *)block->remote_host_addr);
+
+        clear_bit(chunk, block->transit_bitmap);
+
+        if (rdma->nb_sent > 0) {
+            rdma->nb_sent--;
+        }
+
+        if (!rdma->pin_all) {
+            /*
+             * FYI: If one wanted to signal a specific chunk to be unregistered
+             * using LRU or workload-specific information, this is the function
+             * you would call to do so. That chunk would then get asynchronously
+             * unregistered later.
+             */
+#ifdef RDMA_UNREGISTRATION_EXAMPLE
+            qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id);
+#endif
+        }
+    } else {
+        DDDPRINTF("other completion %s (%" PRId64 ") received left %d\n",
+            print_wrid(wr_id), wr_id, rdma->nb_sent);
+    }
+
+    *wr_id_out = wc.wr_id;
+    if (byte_len) {
+        *byte_len = wc.byte_len;
+    }
+
+    return  0;
+}
+
+/*
+ * Block until the next work request has completed.
+ *
+ * First poll to see if a work request has already completed,
+ * otherwise block.
+ *
+ * If we encounter completed work requests for IDs other than
+ * the one we're interested in, then that's generally an error.
+ *
+ * The only exception is actual RDMA Write completions. These
+ * completions only need to be recorded, but do not actually
+ * need further processing.
+ */
+static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
+                                    uint32_t *byte_len)
+{
+    int num_cq_events = 0, ret = 0;
+    struct ibv_cq *cq;
+    void *cq_ctx;
+    uint64_t wr_id = RDMA_WRID_NONE, wr_id_in;
+
+    if (ibv_req_notify_cq(rdma->cq, 0)) {
+        return -1;
+    }
+    /* poll cq first */
+    while (wr_id != wrid_requested) {
+        ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
+        if (ret < 0) {
+            return ret;
+        }
+
+        wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
+
+        if (wr_id == RDMA_WRID_NONE) {
+            break;
+        }
+        if (wr_id != wrid_requested) {
+            DDDPRINTF("A Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
+                print_wrid(wrid_requested),
+                wrid_requested, print_wrid(wr_id), wr_id);
+        }
+    }
+
+    if (wr_id == wrid_requested) {
+        return 0;
+    }
+
+    while (1) {
+        /*
+         * Coroutine doesn't start until process_incoming_migration()
+         * so don't yield unless we know we're running inside of a coroutine.
+         */
+        if (rdma->migration_started_on_destination) {
+            yield_until_fd_readable(rdma->comp_channel->fd);
+        }
+
+        if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) {
+            perror("ibv_get_cq_event");
+            goto err_block_for_wrid;
+        }
+
+        num_cq_events++;
+
+        if (ibv_req_notify_cq(cq, 0)) {
+            goto err_block_for_wrid;
+        }
+
+        while (wr_id != wrid_requested) {
+            ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
+            if (ret < 0) {
+                goto err_block_for_wrid;
+            }
+
+            wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
+
+            if (wr_id == RDMA_WRID_NONE) {
+                break;
+            }
+            if (wr_id != wrid_requested) {
+                DDDPRINTF("B Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n",
+                    print_wrid(wrid_requested), wrid_requested,
+                    print_wrid(wr_id), wr_id);
+            }
+        }
+
+        if (wr_id == wrid_requested) {
+            goto success_block_for_wrid;
+        }
+    }
+
+success_block_for_wrid:
+    if (num_cq_events) {
+        ibv_ack_cq_events(cq, num_cq_events);
+    }
+    return 0;
+
+err_block_for_wrid:
+    if (num_cq_events) {
+        ibv_ack_cq_events(cq, num_cq_events);
+    }
+    return ret;
+}
+
+/*
+ * Post a SEND message work request for the control channel
+ * containing some data and block until the post completes.
+ */
+static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
+                                       RDMAControlHeader *head)
+{
+    int ret = 0;
+    RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL];
+    struct ibv_send_wr *bad_wr;
+    struct ibv_sge sge = {
+                           .addr = (uint64_t)(wr->control),
+                           .length = head->len + sizeof(RDMAControlHeader),
+                           .lkey = wr->control_mr->lkey,
+                         };
+    struct ibv_send_wr send_wr = {
+                                   .wr_id = RDMA_WRID_SEND_CONTROL,
+                                   .opcode = IBV_WR_SEND,
+                                   .send_flags = IBV_SEND_SIGNALED,
+                                   .sg_list = &sge,
+                                   .num_sge = 1,
+                                };
+
+    DDDPRINTF("CONTROL: sending %s..\n", control_desc[head->type]);
+
+    /*
+     * We don't actually need to do a memcpy() in here if we used
+     * the "sge" properly, but since we're only sending control messages
+     * (not RAM in a performance-critical path), then its OK for now.
+     *
+     * The copy makes the RDMAControlHeader simpler to manipulate
+     * for the time being.
+     */
+    assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head));
+    memcpy(wr->control, head, sizeof(RDMAControlHeader));
+    control_to_network((void *) wr->control);
+
+    if (buf) {
+        memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len);
+    }
+
+
+    ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
+
+    if (ret > 0) {
+        fprintf(stderr, "Failed to use post IB SEND for control!\n");
+        return -ret;
+    }
+
+    ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
+    if (ret < 0) {
+        fprintf(stderr, "rdma migration: send polling control error!\n");
+    }
+
+    return ret;
+}
+
+/*
+ * Post a RECV work request in anticipation of some future receipt
+ * of data on the control channel.
+ */
+static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx)
+{
+    struct ibv_recv_wr *bad_wr;
+    struct ibv_sge sge = {
+                            .addr = (uint64_t)(rdma->wr_data[idx].control),
+                            .length = RDMA_CONTROL_MAX_BUFFER,
+                            .lkey = rdma->wr_data[idx].control_mr->lkey,
+                         };
+
+    struct ibv_recv_wr recv_wr = {
+                                    .wr_id = RDMA_WRID_RECV_CONTROL + idx,
+                                    .sg_list = &sge,
+                                    .num_sge = 1,
+                                 };
+
+
+    if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) {
+        return -1;
+    }
+
+    return 0;
+}
+
+/*
+ * Block and wait for a RECV control channel message to arrive.
+ */
+static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
+                RDMAControlHeader *head, int expecting, int idx)
+{
+    uint32_t byte_len;
+    int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
+                                       &byte_len);
+
+    if (ret < 0) {
+        fprintf(stderr, "rdma migration: recv polling control error!\n");
+        return ret;
+    }
+
+    network_to_control((void *) rdma->wr_data[idx].control);
+    memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
+
+    DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]);
+
+    if (expecting == RDMA_CONTROL_NONE) {
+        DDDPRINTF("Surprise: got %s (%d)\n",
+                  control_desc[head->type], head->type);
+    } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
+        fprintf(stderr, "Was expecting a %s (%d) control message"
+                ", but got: %s (%d), length: %d\n",
+                control_desc[expecting], expecting,
+                control_desc[head->type], head->type, head->len);
+        return -EIO;
+    }
+    if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
+        fprintf(stderr, "too long length: %d\n", head->len);
+        return -EINVAL;
+    }
+    if (sizeof(*head) + head->len != byte_len) {
+        fprintf(stderr, "Malformed length: %d byte_len %d\n",
+                head->len, byte_len);
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+/*
+ * When a RECV work request has completed, the work request's
+ * buffer is pointed at the header.
+ *
+ * This will advance the pointer to the data portion
+ * of the control message of the work request's buffer that
+ * was populated after the work request finished.
+ */
+static void qemu_rdma_move_header(RDMAContext *rdma, int idx,
+                                  RDMAControlHeader *head)
+{
+    rdma->wr_data[idx].control_len = head->len;
+    rdma->wr_data[idx].control_curr =
+        rdma->wr_data[idx].control + sizeof(RDMAControlHeader);
+}
+
+/*
+ * This is an 'atomic' high-level operation to deliver a single, unified
+ * control-channel message.
+ *
+ * Additionally, if the user is expecting some kind of reply to this message,
+ * they can request a 'resp' response message be filled in by posting an
+ * additional work request on behalf of the user and waiting for an additional
+ * completion.
+ *
+ * The extra (optional) response is used during registration to us from having
+ * to perform an *additional* exchange of message just to provide a response by
+ * instead piggy-backing on the acknowledgement.
+ */
+static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
+                                   uint8_t *data, RDMAControlHeader *resp,
+                                   int *resp_idx,
+                                   int (*callback)(RDMAContext *rdma))
+{
+    int ret = 0;
+
+    /*
+     * Wait until the dest is ready before attempting to deliver the message
+     * by waiting for a READY message.
+     */
+    if (rdma->control_ready_expected) {
+        RDMAControlHeader resp;
+        ret = qemu_rdma_exchange_get_response(rdma,
+                                    &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    /*
+     * If the user is expecting a response, post a WR in anticipation of it.
+     */
+    if (resp) {
+        ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
+        if (ret) {
+            fprintf(stderr, "rdma migration: error posting"
+                    " extra control recv for anticipated result!");
+            return ret;
+        }
+    }
+
+    /*
+     * Post a WR to replace the one we just consumed for the READY message.
+     */
+    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error posting first control recv!");
+        return ret;
+    }
+
+    /*
+     * Deliver the control message that was requested.
+     */
+    ret = qemu_rdma_post_send_control(rdma, data, head);
+
+    if (ret < 0) {
+        fprintf(stderr, "Failed to send control buffer!\n");
+        return ret;
+    }
+
+    /*
+     * If we're expecting a response, block and wait for it.
+     */
+    if (resp) {
+        if (callback) {
+            DDPRINTF("Issuing callback before receiving response...\n");
+            ret = callback(rdma);
+            if (ret < 0) {
+                return ret;
+            }
+        }
+
+        DDPRINTF("Waiting for response %s\n", control_desc[resp->type]);
+        ret = qemu_rdma_exchange_get_response(rdma, resp,
+                                              resp->type, RDMA_WRID_DATA);
+
+        if (ret < 0) {
+            return ret;
+        }
+
+        qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
+        if (resp_idx) {
+            *resp_idx = RDMA_WRID_DATA;
+        }
+        DDPRINTF("Response %s received.\n", control_desc[resp->type]);
+    }
+
+    rdma->control_ready_expected = 1;
+
+    return 0;
+}
+
+/*
+ * This is an 'atomic' high-level operation to receive a single, unified
+ * control-channel message.
+ */
+static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
+                                int expecting)
+{
+    RDMAControlHeader ready = {
+                                .len = 0,
+                                .type = RDMA_CONTROL_READY,
+                                .repeat = 1,
+                              };
+    int ret;
+
+    /*
+     * Inform the source that we're ready to receive a message.
+     */
+    ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
+
+    if (ret < 0) {
+        fprintf(stderr, "Failed to send control buffer!\n");
+        return ret;
+    }
+
+    /*
+     * Block and wait for the message.
+     */
+    ret = qemu_rdma_exchange_get_response(rdma, head,
+                                          expecting, RDMA_WRID_READY);
+
+    if (ret < 0) {
+        return ret;
+    }
+
+    qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
+
+    /*
+     * Post a new RECV work request to replace the one we just consumed.
+     */
+    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error posting second control recv!");
+        return ret;
+    }
+
+    return 0;
+}
+
+/*
+ * Write an actual chunk of memory using RDMA.
+ *
+ * If we're using dynamic registration on the dest-side, we have to
+ * send a registration command first.
+ */
+static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
+                               int current_index, uint64_t current_addr,
+                               uint64_t length)
+{
+    struct ibv_sge sge;
+    struct ibv_send_wr send_wr = { 0 };
+    struct ibv_send_wr *bad_wr;
+    int reg_result_idx, ret, count = 0;
+    uint64_t chunk, chunks;
+    uint8_t *chunk_start, *chunk_end;
+    RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
+    RDMARegister reg;
+    RDMARegisterResult *reg_result;
+    RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
+    RDMAControlHeader head = { .len = sizeof(RDMARegister),
+                               .type = RDMA_CONTROL_REGISTER_REQUEST,
+                               .repeat = 1,
+                             };
+
+retry:
+    sge.addr = (uint64_t)(block->local_host_addr +
+                            (current_addr - block->offset));
+    sge.length = length;
+
+    chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr);
+    chunk_start = ram_chunk_start(block, chunk);
+
+    if (block->is_ram_block) {
+        chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
+
+        if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
+            chunks--;
+        }
+    } else {
+        chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
+
+        if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
+            chunks--;
+        }
+    }
+
+    DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n",
+        chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
+
+    chunk_end = ram_chunk_end(block, chunk + chunks);
+
+    if (!rdma->pin_all) {
+#ifdef RDMA_UNREGISTRATION_EXAMPLE
+        qemu_rdma_unregister_waiting(rdma);
+#endif
+    }
+
+    while (test_bit(chunk, block->transit_bitmap)) {
+        (void)count;
+        DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64
+                " current %" PRIu64 " len %" PRIu64 " %d %d\n",
+                count++, current_index, chunk,
+                sge.addr, length, rdma->nb_sent, block->nb_chunks);
+
+        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
+
+        if (ret < 0) {
+            fprintf(stderr, "Failed to Wait for previous write to complete "
+                    "block %d chunk %" PRIu64
+                    " current %" PRIu64 " len %" PRIu64 " %d\n",
+                    current_index, chunk, sge.addr, length, rdma->nb_sent);
+            return ret;
+        }
+    }
+
+    if (!rdma->pin_all || !block->is_ram_block) {
+        if (!block->remote_keys[chunk]) {
+            /*
+             * This chunk has not yet been registered, so first check to see
+             * if the entire chunk is zero. If so, tell the other size to
+             * memset() + madvise() the entire chunk without RDMA.
+             */
+
+            if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length)
+                   && buffer_find_nonzero_offset((void *)sge.addr,
+                                                    length) == length) {
+                RDMACompress comp = {
+                                        .offset = current_addr,
+                                        .value = 0,
+                                        .block_idx = current_index,
+                                        .length = length,
+                                    };
+
+                head.len = sizeof(comp);
+                head.type = RDMA_CONTROL_COMPRESS;
+
+                DDPRINTF("Entire chunk is zero, sending compress: %"
+                    PRIu64 " for %d "
+                    "bytes, index: %d, offset: %" PRId64 "...\n",
+                    chunk, sge.length, current_index, current_addr);
+
+                compress_to_network(&comp);
+                ret = qemu_rdma_exchange_send(rdma, &head,
+                                (uint8_t *) &comp, NULL, NULL, NULL);
+
+                if (ret < 0) {
+                    return -EIO;
+                }
+
+                acct_update_position(f, sge.length, true);
+
+                return 1;
+            }
+
+            /*
+             * Otherwise, tell other side to register.
+             */
+            reg.current_index = current_index;
+            if (block->is_ram_block) {
+                reg.key.current_addr = current_addr;
+            } else {
+                reg.key.chunk = chunk;
+            }
+            reg.chunks = chunks;
+
+            DDPRINTF("Sending registration request chunk %" PRIu64 " for %d "
+                    "bytes, index: %d, offset: %" PRId64 "...\n",
+                    chunk, sge.length, current_index, current_addr);
+
+            register_to_network(&reg);
+            ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
+                                    &resp, &reg_result_idx, NULL);
+            if (ret < 0) {
+                return ret;
+            }
+
+            /* try to overlap this single registration with the one we sent. */
+            if (qemu_rdma_register_and_get_keys(rdma, block,
+                                                (uint8_t *) sge.addr,
+                                                &sge.lkey, NULL, chunk,
+                                                chunk_start, chunk_end)) {
+                fprintf(stderr, "cannot get lkey!\n");
+                return -EINVAL;
+            }
+
+            reg_result = (RDMARegisterResult *)
+                    rdma->wr_data[reg_result_idx].control_curr;
+
+            network_to_result(reg_result);
+
+            DDPRINTF("Received registration result:"
+                    " my key: %x their key %x, chunk %" PRIu64 "\n",
+                    block->remote_keys[chunk], reg_result->rkey, chunk);
+
+            block->remote_keys[chunk] = reg_result->rkey;
+            block->remote_host_addr = reg_result->host_addr;
+        } else {
+            /* already registered before */
+            if (qemu_rdma_register_and_get_keys(rdma, block,
+                                                (uint8_t *)sge.addr,
+                                                &sge.lkey, NULL, chunk,
+                                                chunk_start, chunk_end)) {
+                fprintf(stderr, "cannot get lkey!\n");
+                return -EINVAL;
+            }
+        }
+
+        send_wr.wr.rdma.rkey = block->remote_keys[chunk];
+    } else {
+        send_wr.wr.rdma.rkey = block->remote_rkey;
+
+        if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
+                                                     &sge.lkey, NULL, chunk,
+                                                     chunk_start, chunk_end)) {
+            fprintf(stderr, "cannot get lkey!\n");
+            return -EINVAL;
+        }
+    }
+
+    /*
+     * Encode the ram block index and chunk within this wrid.
+     * We will use this information at the time of completion
+     * to figure out which bitmap to check against and then which
+     * chunk in the bitmap to look for.
+     */
+    send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
+                                        current_index, chunk);
+
+    send_wr.opcode = IBV_WR_RDMA_WRITE;
+    send_wr.send_flags = IBV_SEND_SIGNALED;
+    send_wr.sg_list = &sge;
+    send_wr.num_sge = 1;
+    send_wr.wr.rdma.remote_addr = block->remote_host_addr +
+                                (current_addr - block->offset);
+
+    DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx"
+              " remote: %lx, bytes %" PRIu32 "\n",
+              chunk, sge.addr, send_wr.wr.rdma.remote_addr,
+              sge.length);
+
+    /*
+     * ibv_post_send() does not return negative error numbers,
+     * per the specification they are positive - no idea why.
+     */
+    ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
+
+    if (ret == ENOMEM) {
+        DDPRINTF("send queue is full. wait a little....\n");
+        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
+        if (ret < 0) {
+            fprintf(stderr, "rdma migration: failed to make "
+                            "room in full send queue! %d\n", ret);
+            return ret;
+        }
+
+        goto retry;
+
+    } else if (ret > 0) {
+        perror("rdma migration: post rdma write failed");
+        return -ret;
+    }
+
+    set_bit(chunk, block->transit_bitmap);
+    acct_update_position(f, sge.length, false);
+    rdma->total_writes++;
+
+    return 0;
+}
+
+/*
+ * Push out any unwritten RDMA operations.
+ *
+ * We support sending out multiple chunks at the same time.
+ * Not all of them need to get signaled in the completion queue.
+ */
+static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
+{
+    int ret;
+
+    if (!rdma->current_length) {
+        return 0;
+    }
+
+    ret = qemu_rdma_write_one(f, rdma,
+            rdma->current_index, rdma->current_addr, rdma->current_length);
+
+    if (ret < 0) {
+        return ret;
+    }
+
+    if (ret == 0) {
+        rdma->nb_sent++;
+        DDDPRINTF("sent total: %d\n", rdma->nb_sent);
+    }
+
+    rdma->current_length = 0;
+    rdma->current_addr = 0;
+
+    return 0;
+}
+
+static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
+                    uint64_t offset, uint64_t len)
+{
+    RDMALocalBlock *block;
+    uint8_t *host_addr;
+    uint8_t *chunk_end;
+
+    if (rdma->current_index < 0) {
+        return 0;
+    }
+
+    if (rdma->current_chunk < 0) {
+        return 0;
+    }
+
+    block = &(rdma->local_ram_blocks.block[rdma->current_index]);
+    host_addr = block->local_host_addr + (offset - block->offset);
+    chunk_end = ram_chunk_end(block, rdma->current_chunk);
+
+    if (rdma->current_length == 0) {
+        return 0;
+    }
+
+    /*
+     * Only merge into chunk sequentially.
+     */
+    if (offset != (rdma->current_addr + rdma->current_length)) {
+        return 0;
+    }
+
+    if (offset < block->offset) {
+        return 0;
+    }
+
+    if ((offset + len) > (block->offset + block->length)) {
+        return 0;
+    }
+
+    if ((host_addr + len) > chunk_end) {
+        return 0;
+    }
+
+    return 1;
+}
+
+/*
+ * We're not actually writing here, but doing three things:
+ *
+ * 1. Identify the chunk the buffer belongs to.
+ * 2. If the chunk is full or the buffer doesn't belong to the current
+ *    chunk, then start a new chunk and flush() the old chunk.
+ * 3. To keep the hardware busy, we also group chunks into batches
+ *    and only require that a batch gets acknowledged in the completion
+ *    qeueue instead of each individual chunk.
+ */
+static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
+                           uint64_t block_offset, uint64_t offset,
+                           uint64_t len)
+{
+    uint64_t current_addr = block_offset + offset;
+    uint64_t index = rdma->current_index;
+    uint64_t chunk = rdma->current_chunk;
+    int ret;
+
+    /* If we cannot merge it, we flush the current buffer first. */
+    if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
+        ret = qemu_rdma_write_flush(f, rdma);
+        if (ret) {
+            return ret;
+        }
+        rdma->current_length = 0;
+        rdma->current_addr = current_addr;
+
+        ret = qemu_rdma_search_ram_block(rdma, block_offset,
+                                         offset, len, &index, &chunk);
+        if (ret) {
+            fprintf(stderr, "ram block search failed\n");
+            return ret;
+        }
+        rdma->current_index = index;
+        rdma->current_chunk = chunk;
+    }
+
+    /* merge it */
+    rdma->current_length += len;
+
+    /* flush it if buffer is too large */
+    if (rdma->current_length >= RDMA_MERGE_MAX) {
+        return qemu_rdma_write_flush(f, rdma);
+    }
+
+    return 0;
+}
+
+static void qemu_rdma_cleanup(RDMAContext *rdma)
+{
+    struct rdma_cm_event *cm_event;
+    int ret, idx;
+
+    if (rdma->cm_id && rdma->connected) {
+        if (rdma->error_state) {
+            RDMAControlHeader head = { .len = 0,
+                                       .type = RDMA_CONTROL_ERROR,
+                                       .repeat = 1,
+                                     };
+            fprintf(stderr, "Early error. Sending error.\n");
+            qemu_rdma_post_send_control(rdma, NULL, &head);
+        }
+
+        ret = rdma_disconnect(rdma->cm_id);
+        if (!ret) {
+            DDPRINTF("waiting for disconnect\n");
+            ret = rdma_get_cm_event(rdma->channel, &cm_event);
+            if (!ret) {
+                rdma_ack_cm_event(cm_event);
+            }
+        }
+        DDPRINTF("Disconnected.\n");
+        rdma->connected = false;
+    }
+
+    g_free(rdma->block);
+    rdma->block = NULL;
+
+    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
+        if (rdma->wr_data[idx].control_mr) {
+            rdma->total_registrations--;
+            ibv_dereg_mr(rdma->wr_data[idx].control_mr);
+        }
+        rdma->wr_data[idx].control_mr = NULL;
+    }
+
+    if (rdma->local_ram_blocks.block) {
+        while (rdma->local_ram_blocks.nb_blocks) {
+            __qemu_rdma_delete_block(rdma,
+                    rdma->local_ram_blocks.block->offset);
+        }
+    }
+
+    if (rdma->cq) {
+        ibv_destroy_cq(rdma->cq);
+        rdma->cq = NULL;
+    }
+    if (rdma->comp_channel) {
+        ibv_destroy_comp_channel(rdma->comp_channel);
+        rdma->comp_channel = NULL;
+    }
+    if (rdma->pd) {
+        ibv_dealloc_pd(rdma->pd);
+        rdma->pd = NULL;
+    }
+    if (rdma->listen_id) {
+        rdma_destroy_id(rdma->listen_id);
+        rdma->listen_id = NULL;
+    }
+    if (rdma->cm_id) {
+        if (rdma->qp) {
+            rdma_destroy_qp(rdma->cm_id);
+            rdma->qp = NULL;
+        }
+        rdma_destroy_id(rdma->cm_id);
+        rdma->cm_id = NULL;
+    }
+    if (rdma->channel) {
+        rdma_destroy_event_channel(rdma->channel);
+        rdma->channel = NULL;
+    }
+    g_free(rdma->host);
+    rdma->host = NULL;
+}
+
+
+static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all)
+{
+    int ret, idx;
+    Error *local_err = NULL, **temp = &local_err;
+
+    /*
+     * Will be validated against destination's actual capabilities
+     * after the connect() completes.
+     */
+    rdma->pin_all = pin_all;
+
+    ret = qemu_rdma_resolve_host(rdma, temp);
+    if (ret) {
+        goto err_rdma_source_init;
+    }
+
+    ret = qemu_rdma_alloc_pd_cq(rdma);
+    if (ret) {
+        ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()"
+                    " limits may be too low. Please check $ ulimit -a # and "
+                    "search for 'ulimit -l' in the output");
+        goto err_rdma_source_init;
+    }
+
+    ret = qemu_rdma_alloc_qp(rdma);
+    if (ret) {
+        ERROR(temp, "rdma migration: error allocating qp!");
+        goto err_rdma_source_init;
+    }
+
+    ret = qemu_rdma_init_ram_blocks(rdma);
+    if (ret) {
+        ERROR(temp, "rdma migration: error initializing ram blocks!");
+        goto err_rdma_source_init;
+    }
+
+    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
+        ret = qemu_rdma_reg_control(rdma, idx);
+        if (ret) {
+            ERROR(temp, "rdma migration: error registering %d control!",
+                                                            idx);
+            goto err_rdma_source_init;
+        }
+    }
+
+    return 0;
+
+err_rdma_source_init:
+    error_propagate(errp, local_err);
+    qemu_rdma_cleanup(rdma);
+    return -1;
+}
+
+static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
+{
+    RDMACapabilities cap = {
+                                .version = RDMA_CONTROL_VERSION_CURRENT,
+                                .flags = 0,
+                           };
+    struct rdma_conn_param conn_param = { .initiator_depth = 2,
+                                          .retry_count = 5,
+                                          .private_data = &cap,
+                                          .private_data_len = sizeof(cap),
+                                        };
+    struct rdma_cm_event *cm_event;
+    int ret;
+
+    /*
+     * Only negotiate the capability with destination if the user
+     * on the source first requested the capability.
+     */
+    if (rdma->pin_all) {
+        DPRINTF("Server pin-all memory requested.\n");
+        cap.flags |= RDMA_CAPABILITY_PIN_ALL;
+    }
+
+    caps_to_network(&cap);
+
+    ret = rdma_connect(rdma->cm_id, &conn_param);
+    if (ret) {
+        perror("rdma_connect");
+        ERROR(errp, "connecting to destination!");
+        rdma_destroy_id(rdma->cm_id);
+        rdma->cm_id = NULL;
+        goto err_rdma_source_connect;
+    }
+
+    ret = rdma_get_cm_event(rdma->channel, &cm_event);
+    if (ret) {
+        perror("rdma_get_cm_event after rdma_connect");
+        ERROR(errp, "connecting to destination!");
+        rdma_ack_cm_event(cm_event);
+        rdma_destroy_id(rdma->cm_id);
+        rdma->cm_id = NULL;
+        goto err_rdma_source_connect;
+    }
+
+    if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
+        perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
+        ERROR(errp, "connecting to destination!");
+        rdma_ack_cm_event(cm_event);
+        rdma_destroy_id(rdma->cm_id);
+        rdma->cm_id = NULL;
+        goto err_rdma_source_connect;
+    }
+    rdma->connected = true;
+
+    memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
+    network_to_caps(&cap);
+
+    /*
+     * Verify that the *requested* capabilities are supported by the destination
+     * and disable them otherwise.
+     */
+    if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) {
+        ERROR(errp, "Server cannot support pinning all memory. "
+                        "Will register memory dynamically.");
+        rdma->pin_all = false;
+    }
+
+    DPRINTF("Pin all memory: %s\n", rdma->pin_all ? "enabled" : "disabled");
+
+    rdma_ack_cm_event(cm_event);
+
+    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
+    if (ret) {
+        ERROR(errp, "posting second control recv!");
+        goto err_rdma_source_connect;
+    }
+
+    rdma->control_ready_expected = 1;
+    rdma->nb_sent = 0;
+    return 0;
+
+err_rdma_source_connect:
+    qemu_rdma_cleanup(rdma);
+    return -1;
+}
+
+static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
+{
+    int ret = -EINVAL, idx;
+    struct rdma_cm_id *listen_id;
+    char ip[40] = "unknown";
+    struct rdma_addrinfo *res;
+    char port_str[16];
+
+    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
+        rdma->wr_data[idx].control_len = 0;
+        rdma->wr_data[idx].control_curr = NULL;
+    }
+
+    if (rdma->host == NULL) {
+        ERROR(errp, "RDMA host is not set!");
+        rdma->error_state = -EINVAL;
+        return -1;
+    }
+    /* create CM channel */
+    rdma->channel = rdma_create_event_channel();
+    if (!rdma->channel) {
+        ERROR(errp, "could not create rdma event channel");
+        rdma->error_state = -EINVAL;
+        return -1;
+    }
+
+    /* create CM id */
+    ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
+    if (ret) {
+        ERROR(errp, "could not create cm_id!");
+        goto err_dest_init_create_listen_id;
+    }
+
+    snprintf(port_str, 16, "%d", rdma->port);
+    port_str[15] = '\0';
+
+    if (rdma->host && strcmp("", rdma->host)) {
+        struct rdma_addrinfo *e;
+
+        ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
+        if (ret < 0) {
+            ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
+            goto err_dest_init_bind_addr;
+        }
+
+        for (e = res; e != NULL; e = e->ai_next) {
+            inet_ntop(e->ai_family,
+                &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
+            DPRINTF("Trying %s => %s\n", rdma->host, ip);
+            ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
+            if (!ret) {
+                if (e->ai_family == AF_INET6) {
+                    ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs);
+                    if (ret) {
+                        continue;
+                    }
+                }
+                    
+                goto listen;
+            }
+        }
+
+        ERROR(errp, "Error: could not rdma_bind_addr!");
+        goto err_dest_init_bind_addr;
+    } else {
+        ERROR(errp, "migration host and port not specified!");
+        ret = -EINVAL;
+        goto err_dest_init_bind_addr;
+    }
+listen:
+
+    rdma->listen_id = listen_id;
+    qemu_rdma_dump_gid("dest_init", listen_id);
+    return 0;
+
+err_dest_init_bind_addr:
+    rdma_destroy_id(listen_id);
+err_dest_init_create_listen_id:
+    rdma_destroy_event_channel(rdma->channel);
+    rdma->channel = NULL;
+    rdma->error_state = ret;
+    return ret;
+
+}
+
+static void *qemu_rdma_data_init(const char *host_port, Error **errp)
+{
+    RDMAContext *rdma = NULL;
+    InetSocketAddress *addr;
+
+    if (host_port) {
+        rdma = g_malloc0(sizeof(RDMAContext));
+        memset(rdma, 0, sizeof(RDMAContext));
+        rdma->current_index = -1;
+        rdma->current_chunk = -1;
+
+        addr = inet_parse(host_port, NULL);
+        if (addr != NULL) {
+            rdma->port = atoi(addr->port);
+            rdma->host = g_strdup(addr->host);
+        } else {
+            ERROR(errp, "bad RDMA migration address '%s'", host_port);
+            g_free(rdma);
+            rdma = NULL;
+        }
+
+        qapi_free_InetSocketAddress(addr);
+    }
+
+    return rdma;
+}
+
+/*
+ * QEMUFile interface to the control channel.
+ * SEND messages for control only.
+ * VM's ram is handled with regular RDMA messages.
+ */
+static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
+                                int64_t pos, int size)
+{
+    QEMUFileRDMA *r = opaque;
+    QEMUFile *f = r->file;
+    RDMAContext *rdma = r->rdma;
+    size_t remaining = size;
+    uint8_t * data = (void *) buf;
+    int ret;
+
+    CHECK_ERROR_STATE();
+
+    /*
+     * Push out any writes that
+     * we're queued up for VM's ram.
+     */
+    ret = qemu_rdma_write_flush(f, rdma);
+    if (ret < 0) {
+        rdma->error_state = ret;
+        return ret;
+    }
+
+    while (remaining) {
+        RDMAControlHeader head;
+
+        r->len = MIN(remaining, RDMA_SEND_INCREMENT);
+        remaining -= r->len;
+
+        head.len = r->len;
+        head.type = RDMA_CONTROL_QEMU_FILE;
+
+        ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
+
+        if (ret < 0) {
+            rdma->error_state = ret;
+            return ret;
+        }
+
+        data += r->len;
+    }
+
+    return size;
+}
+
+static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
+                             int size, int idx)
+{
+    size_t len = 0;
+
+    if (rdma->wr_data[idx].control_len) {
+        DDDPRINTF("RDMA %" PRId64 " of %d bytes already in buffer\n",
+                    rdma->wr_data[idx].control_len, size);
+
+        len = MIN(size, rdma->wr_data[idx].control_len);
+        memcpy(buf, rdma->wr_data[idx].control_curr, len);
+        rdma->wr_data[idx].control_curr += len;
+        rdma->wr_data[idx].control_len -= len;
+    }
+
+    return len;
+}
+
+/*
+ * QEMUFile interface to the control channel.
+ * RDMA links don't use bytestreams, so we have to
+ * return bytes to QEMUFile opportunistically.
+ */
+static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
+                                int64_t pos, int size)
+{
+    QEMUFileRDMA *r = opaque;
+    RDMAContext *rdma = r->rdma;
+    RDMAControlHeader head;
+    int ret = 0;
+
+    CHECK_ERROR_STATE();
+
+    /*
+     * First, we hold on to the last SEND message we
+     * were given and dish out the bytes until we run
+     * out of bytes.
+     */
+    r->len = qemu_rdma_fill(r->rdma, buf, size, 0);
+    if (r->len) {
+        return r->len;
+    }
+
+    /*
+     * Once we run out, we block and wait for another
+     * SEND message to arrive.
+     */
+    ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE);
+
+    if (ret < 0) {
+        rdma->error_state = ret;
+        return ret;
+    }
+
+    /*
+     * SEND was received with new bytes, now try again.
+     */
+    return qemu_rdma_fill(r->rdma, buf, size, 0);
+}
+
+/*
+ * Block until all the outstanding chunks have been delivered by the hardware.
+ */
+static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
+{
+    int ret;
+
+    if (qemu_rdma_write_flush(f, rdma) < 0) {
+        return -EIO;
+    }
+
+    while (rdma->nb_sent) {
+        ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
+        if (ret < 0) {
+            fprintf(stderr, "rdma migration: complete polling error!\n");
+            return -EIO;
+        }
+    }
+
+    qemu_rdma_unregister_waiting(rdma);
+
+    return 0;
+}
+
+static int qemu_rdma_close(void *opaque)
+{
+    DPRINTF("Shutting down connection.\n");
+    QEMUFileRDMA *r = opaque;
+    if (r->rdma) {
+        qemu_rdma_cleanup(r->rdma);
+        g_free(r->rdma);
+    }
+    g_free(r);
+    return 0;
+}
+
+/*
+ * Parameters:
+ *    @offset == 0 :
+ *        This means that 'block_offset' is a full virtual address that does not
+ *        belong to a RAMBlock of the virtual machine and instead
+ *        represents a private malloc'd memory area that the caller wishes to
+ *        transfer.
+ *
+ *    @offset != 0 :
+ *        Offset is an offset to be added to block_offset and used
+ *        to also lookup the corresponding RAMBlock.
+ *
+ *    @size > 0 :
+ *        Initiate an transfer this size.
+ *
+ *    @size == 0 :
+ *        A 'hint' or 'advice' that means that we wish to speculatively
+ *        and asynchronously unregister this memory. In this case, there is no
+ *        guarantee that the unregister will actually happen, for example,
+ *        if the memory is being actively transmitted. Additionally, the memory
+ *        may be re-registered at any future time if a write within the same
+ *        chunk was requested again, even if you attempted to unregister it
+ *        here.
+ *
+ *    @size < 0 : TODO, not yet supported
+ *        Unregister the memory NOW. This means that the caller does not
+ *        expect there to be any future RDMA transfers and we just want to clean
+ *        things up. This is used in case the upper layer owns the memory and
+ *        cannot wait for qemu_fclose() to occur.
+ *
+ *    @bytes_sent : User-specificed pointer to indicate how many bytes were
+ *                  sent. Usually, this will not be more than a few bytes of
+ *                  the protocol because most transfers are sent asynchronously.
+ */
+static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
+                                  ram_addr_t block_offset, ram_addr_t offset,
+                                  size_t size, int *bytes_sent)
+{
+    QEMUFileRDMA *rfile = opaque;
+    RDMAContext *rdma = rfile->rdma;
+    int ret;
+
+    CHECK_ERROR_STATE();
+
+    qemu_fflush(f);
+
+    if (size > 0) {
+        /*
+         * Add this page to the current 'chunk'. If the chunk
+         * is full, or the page doen't belong to the current chunk,
+         * an actual RDMA write will occur and a new chunk will be formed.
+         */
+        ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
+        if (ret < 0) {
+            fprintf(stderr, "rdma migration: write error! %d\n", ret);
+            goto err;
+        }
+
+        /*
+         * We always return 1 bytes because the RDMA
+         * protocol is completely asynchronous. We do not yet know
+         * whether an  identified chunk is zero or not because we're
+         * waiting for other pages to potentially be merged with
+         * the current chunk. So, we have to call qemu_update_position()
+         * later on when the actual write occurs.
+         */
+        if (bytes_sent) {
+            *bytes_sent = 1;
+        }
+    } else {
+        uint64_t index, chunk;
+
+        /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
+        if (size < 0) {
+            ret = qemu_rdma_drain_cq(f, rdma);
+            if (ret < 0) {
+                fprintf(stderr, "rdma: failed to synchronously drain"
+                                " completion queue before unregistration.\n");
+                goto err;
+            }
+        }
+        */
+
+        ret = qemu_rdma_search_ram_block(rdma, block_offset,
+                                         offset, size, &index, &chunk);
+
+        if (ret) {
+            fprintf(stderr, "ram block search failed\n");
+            goto err;
+        }
+
+        qemu_rdma_signal_unregister(rdma, index, chunk, 0);
+
+        /*
+         * TODO: Synchronous, guaranteed unregistration (should not occur during
+         * fast-path). Otherwise, unregisters will process on the next call to
+         * qemu_rdma_drain_cq()
+        if (size < 0) {
+            qemu_rdma_unregister_waiting(rdma);
+        }
+        */
+    }
+
+    /*
+     * Drain the Completion Queue if possible, but do not block,
+     * just poll.
+     *
+     * If nothing to poll, the end of the iteration will do this
+     * again to make sure we don't overflow the request queue.
+     */
+    while (1) {
+        uint64_t wr_id, wr_id_in;
+        int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
+        if (ret < 0) {
+            fprintf(stderr, "rdma migration: polling error! %d\n", ret);
+            goto err;
+        }
+
+        wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
+
+        if (wr_id == RDMA_WRID_NONE) {
+            break;
+        }
+    }
+
+    return RAM_SAVE_CONTROL_DELAYED;
+err:
+    rdma->error_state = ret;
+    return ret;
+}
+
+static int qemu_rdma_accept(RDMAContext *rdma)
+{
+    RDMACapabilities cap;
+    struct rdma_conn_param conn_param = {
+                                            .responder_resources = 2,
+                                            .private_data = &cap,
+                                            .private_data_len = sizeof(cap),
+                                         };
+    struct rdma_cm_event *cm_event;
+    struct ibv_context *verbs;
+    int ret = -EINVAL;
+    int idx;
+
+    ret = rdma_get_cm_event(rdma->channel, &cm_event);
+    if (ret) {
+        goto err_rdma_dest_wait;
+    }
+
+    if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+        rdma_ack_cm_event(cm_event);
+        goto err_rdma_dest_wait;
+    }
+
+    memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
+
+    network_to_caps(&cap);
+
+    if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
+            fprintf(stderr, "Unknown source RDMA version: %d, bailing...\n",
+                            cap.version);
+            rdma_ack_cm_event(cm_event);
+            goto err_rdma_dest_wait;
+    }
+
+    /*
+     * Respond with only the capabilities this version of QEMU knows about.
+     */
+    cap.flags &= known_capabilities;
+
+    /*
+     * Enable the ones that we do know about.
+     * Add other checks here as new ones are introduced.
+     */
+    if (cap.flags & RDMA_CAPABILITY_PIN_ALL) {
+        rdma->pin_all = true;
+    }
+
+    rdma->cm_id = cm_event->id;
+    verbs = cm_event->id->verbs;
+
+    rdma_ack_cm_event(cm_event);
+
+    DPRINTF("Memory pin all: %s\n", rdma->pin_all ? "enabled" : "disabled");
+
+    caps_to_network(&cap);
+
+    DPRINTF("verbs context after listen: %p\n", verbs);
+
+    if (!rdma->verbs) {
+        rdma->verbs = verbs;
+    } else if (rdma->verbs != verbs) {
+            fprintf(stderr, "ibv context not matching %p, %p!\n",
+                    rdma->verbs, verbs);
+            goto err_rdma_dest_wait;
+    }
+
+    qemu_rdma_dump_id("dest_init", verbs);
+
+    ret = qemu_rdma_alloc_pd_cq(rdma);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error allocating pd and cq!\n");
+        goto err_rdma_dest_wait;
+    }
+
+    ret = qemu_rdma_alloc_qp(rdma);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error allocating qp!\n");
+        goto err_rdma_dest_wait;
+    }
+
+    ret = qemu_rdma_init_ram_blocks(rdma);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error initializing ram blocks!\n");
+        goto err_rdma_dest_wait;
+    }
+
+    for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
+        ret = qemu_rdma_reg_control(rdma, idx);
+        if (ret) {
+            fprintf(stderr, "rdma: error registering %d control!\n", idx);
+            goto err_rdma_dest_wait;
+        }
+    }
+
+    qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL);
+
+    ret = rdma_accept(rdma->cm_id, &conn_param);
+    if (ret) {
+        fprintf(stderr, "rdma_accept returns %d!\n", ret);
+        goto err_rdma_dest_wait;
+    }
+
+    ret = rdma_get_cm_event(rdma->channel, &cm_event);
+    if (ret) {
+        fprintf(stderr, "rdma_accept get_cm_event failed %d!\n", ret);
+        goto err_rdma_dest_wait;
+    }
+
+    if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
+        fprintf(stderr, "rdma_accept not event established!\n");
+        rdma_ack_cm_event(cm_event);
+        goto err_rdma_dest_wait;
+    }
+
+    rdma_ack_cm_event(cm_event);
+    rdma->connected = true;
+
+    ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
+    if (ret) {
+        fprintf(stderr, "rdma migration: error posting second control recv!\n");
+        goto err_rdma_dest_wait;
+    }
+
+    qemu_rdma_dump_gid("dest_connect", rdma->cm_id);
+
+    return 0;
+
+err_rdma_dest_wait:
+    rdma->error_state = ret;
+    qemu_rdma_cleanup(rdma);
+    return ret;
+}
+
+/*
+ * During each iteration of the migration, we listen for instructions
+ * by the source VM to perform dynamic page registrations before they
+ * can perform RDMA operations.
+ *
+ * We respond with the 'rkey'.
+ *
+ * Keep doing this until the source tells us to stop.
+ */
+static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
+                                         uint64_t flags)
+{
+    RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
+                               .type = RDMA_CONTROL_REGISTER_RESULT,
+                               .repeat = 0,
+                             };
+    RDMAControlHeader unreg_resp = { .len = 0,
+                               .type = RDMA_CONTROL_UNREGISTER_FINISHED,
+                               .repeat = 0,
+                             };
+    RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
+                                 .repeat = 1 };
+    QEMUFileRDMA *rfile = opaque;
+    RDMAContext *rdma = rfile->rdma;
+    RDMALocalBlocks *local = &rdma->local_ram_blocks;
+    RDMAControlHeader head;
+    RDMARegister *reg, *registers;
+    RDMACompress *comp;
+    RDMARegisterResult *reg_result;
+    static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE];
+    RDMALocalBlock *block;
+    void *host_addr;
+    int ret = 0;
+    int idx = 0;
+    int count = 0;
+    int i = 0;
+
+    CHECK_ERROR_STATE();
+
+    do {
+        DDDPRINTF("Waiting for next request %" PRIu64 "...\n", flags);
+
+        ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
+
+        if (ret < 0) {
+            break;
+        }
+
+        if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
+            fprintf(stderr, "rdma: Too many requests in this message (%d)."
+                            "Bailing.\n", head.repeat);
+            ret = -EIO;
+            break;
+        }
+
+        switch (head.type) {
+        case RDMA_CONTROL_COMPRESS:
+            comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
+            network_to_compress(comp);
+
+            DDPRINTF("Zapping zero chunk: %" PRId64
+                    " bytes, index %d, offset %" PRId64 "\n",
+                    comp->length, comp->block_idx, comp->offset);
+            block = &(rdma->local_ram_blocks.block[comp->block_idx]);
+
+            host_addr = block->local_host_addr +
+                            (comp->offset - block->offset);
+
+            ram_handle_compressed(host_addr, comp->value, comp->length);
+            break;
+
+        case RDMA_CONTROL_REGISTER_FINISHED:
+            DDDPRINTF("Current registrations complete.\n");
+            goto out;
+
+        case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
+            DPRINTF("Initial setup info requested.\n");
+
+            if (rdma->pin_all) {
+                ret = qemu_rdma_reg_whole_ram_blocks(rdma);
+                if (ret) {
+                    fprintf(stderr, "rdma migration: error dest "
+                                    "registering ram blocks!\n");
+                    goto out;
+                }
+            }
+
+            /*
+             * Dest uses this to prepare to transmit the RAMBlock descriptions
+             * to the source VM after connection setup.
+             * Both sides use the "remote" structure to communicate and update
+             * their "local" descriptions with what was sent.
+             */
+            for (i = 0; i < local->nb_blocks; i++) {
+                rdma->block[i].remote_host_addr =
+                    (uint64_t)(local->block[i].local_host_addr);
+
+                if (rdma->pin_all) {
+                    rdma->block[i].remote_rkey = local->block[i].mr->rkey;
+                }
+
+                rdma->block[i].offset = local->block[i].offset;
+                rdma->block[i].length = local->block[i].length;
+
+                remote_block_to_network(&rdma->block[i]);
+            }
+
+            blocks.len = rdma->local_ram_blocks.nb_blocks
+                                                * sizeof(RDMARemoteBlock);
+
+
+            ret = qemu_rdma_post_send_control(rdma,
+                                        (uint8_t *) rdma->block, &blocks);
+
+            if (ret < 0) {
+                fprintf(stderr, "rdma migration: error sending remote info!\n");
+                goto out;
+            }
+
+            break;
+        case RDMA_CONTROL_REGISTER_REQUEST:
+            DDPRINTF("There are %d registration requests\n", head.repeat);
+
+            reg_resp.repeat = head.repeat;
+            registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
+
+            for (count = 0; count < head.repeat; count++) {
+                uint64_t chunk;
+                uint8_t *chunk_start, *chunk_end;
+
+                reg = &registers[count];
+                network_to_register(reg);
+
+                reg_result = &results[count];
+
+                DDPRINTF("Registration request (%d): index %d, current_addr %"
+                         PRIu64 " chunks: %" PRIu64 "\n", count,
+                         reg->current_index, reg->key.current_addr, reg->chunks);
+
+                block = &(rdma->local_ram_blocks.block[reg->current_index]);
+                if (block->is_ram_block) {
+                    host_addr = (block->local_host_addr +
+                                (reg->key.current_addr - block->offset));
+                    chunk = ram_chunk_index(block->local_host_addr,
+                                            (uint8_t *) host_addr);
+                } else {
+                    chunk = reg->key.chunk;
+                    host_addr = block->local_host_addr +
+                        (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT));
+                }
+                chunk_start = ram_chunk_start(block, chunk);
+                chunk_end = ram_chunk_end(block, chunk + reg->chunks);
+                if (qemu_rdma_register_and_get_keys(rdma, block,
+                            (uint8_t *)host_addr, NULL, &reg_result->rkey,
+                            chunk, chunk_start, chunk_end)) {
+                    fprintf(stderr, "cannot get rkey!\n");
+                    ret = -EINVAL;
+                    goto out;
+                }
+
+                reg_result->host_addr = (uint64_t) block->local_host_addr;
+
+                DDPRINTF("Registered rkey for this request: %x\n",
+                                reg_result->rkey);
+
+                result_to_network(reg_result);
+            }
+
+            ret = qemu_rdma_post_send_control(rdma,
+                            (uint8_t *) results, &reg_resp);
+
+            if (ret < 0) {
+                fprintf(stderr, "Failed to send control buffer!\n");
+                goto out;
+            }
+            break;
+        case RDMA_CONTROL_UNREGISTER_REQUEST:
+            DDPRINTF("There are %d unregistration requests\n", head.repeat);
+            unreg_resp.repeat = head.repeat;
+            registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
+
+            for (count = 0; count < head.repeat; count++) {
+                reg = &registers[count];
+                network_to_register(reg);
+
+                DDPRINTF("Unregistration request (%d): "
+                         " index %d, chunk %" PRIu64 "\n",
+                         count, reg->current_index, reg->key.chunk);
+
+                block = &(rdma->local_ram_blocks.block[reg->current_index]);
+
+                ret = ibv_dereg_mr(block->pmr[reg->key.chunk]);
+                block->pmr[reg->key.chunk] = NULL;
+
+                if (ret != 0) {
+                    perror("rdma unregistration chunk failed");
+                    ret = -ret;
+                    goto out;
+                }
+
+                rdma->total_registrations--;
+
+                DDPRINTF("Unregistered chunk %" PRIu64 " successfully.\n",
+                            reg->key.chunk);
+            }
+
+            ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
+
+            if (ret < 0) {
+                fprintf(stderr, "Failed to send control buffer!\n");
+                goto out;
+            }
+            break;
+        case RDMA_CONTROL_REGISTER_RESULT:
+            fprintf(stderr, "Invalid RESULT message at dest.\n");
+            ret = -EIO;
+            goto out;
+        default:
+            fprintf(stderr, "Unknown control message %s\n",
+                                control_desc[head.type]);
+            ret = -EIO;
+            goto out;
+        }
+    } while (1);
+out:
+    if (ret < 0) {
+        rdma->error_state = ret;
+    }
+    return ret;
+}
+
+static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
+                                        uint64_t flags)
+{
+    QEMUFileRDMA *rfile = opaque;
+    RDMAContext *rdma = rfile->rdma;
+
+    CHECK_ERROR_STATE();
+
+    DDDPRINTF("start section: %" PRIu64 "\n", flags);
+    qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
+    qemu_fflush(f);
+
+    return 0;
+}
+
+/*
+ * Inform dest that dynamic registrations are done for now.
+ * First, flush writes, if any.
+ */
+static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
+                                       uint64_t flags)
+{
+    Error *local_err = NULL, **errp = &local_err;
+    QEMUFileRDMA *rfile = opaque;
+    RDMAContext *rdma = rfile->rdma;
+    RDMAControlHeader head = { .len = 0, .repeat = 1 };
+    int ret = 0;
+
+    CHECK_ERROR_STATE();
+
+    qemu_fflush(f);
+    ret = qemu_rdma_drain_cq(f, rdma);
+
+    if (ret < 0) {
+        goto err;
+    }
+
+    if (flags == RAM_CONTROL_SETUP) {
+        RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT };
+        RDMALocalBlocks *local = &rdma->local_ram_blocks;
+        int reg_result_idx, i, j, nb_remote_blocks;
+
+        head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
+        DPRINTF("Sending registration setup for ram blocks...\n");
+
+        /*
+         * Make sure that we parallelize the pinning on both sides.
+         * For very large guests, doing this serially takes a really
+         * long time, so we have to 'interleave' the pinning locally
+         * with the control messages by performing the pinning on this
+         * side before we receive the control response from the other
+         * side that the pinning has completed.
+         */
+        ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp,
+                    &reg_result_idx, rdma->pin_all ?
+                    qemu_rdma_reg_whole_ram_blocks : NULL);
+        if (ret < 0) {
+            ERROR(errp, "receiving remote info!");
+            return ret;
+        }
+
+        nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock);
+
+        /*
+         * The protocol uses two different sets of rkeys (mutually exclusive):
+         * 1. One key to represent the virtual address of the entire ram block.
+         *    (dynamic chunk registration disabled - pin everything with one rkey.)
+         * 2. One to represent individual chunks within a ram block.
+         *    (dynamic chunk registration enabled - pin individual chunks.)
+         *
+         * Once the capability is successfully negotiated, the destination transmits
+         * the keys to use (or sends them later) including the virtual addresses
+         * and then propagates the remote ram block descriptions to his local copy.
+         */
+
+        if (local->nb_blocks != nb_remote_blocks) {
+            ERROR(errp, "ram blocks mismatch #1! "
+                        "Your QEMU command line parameters are probably "
+                        "not identical on both the source and destination.");
+            return -EINVAL;
+        }
+
+        qemu_rdma_move_header(rdma, reg_result_idx, &resp);
+        memcpy(rdma->block,
+            rdma->wr_data[reg_result_idx].control_curr, resp.len);
+        for (i = 0; i < nb_remote_blocks; i++) {
+            network_to_remote_block(&rdma->block[i]);
+
+            /* search local ram blocks */
+            for (j = 0; j < local->nb_blocks; j++) {
+                if (rdma->block[i].offset != local->block[j].offset) {
+                    continue;
+                }
+
+                if (rdma->block[i].length != local->block[j].length) {
+                    ERROR(errp, "ram blocks mismatch #2! "
+                        "Your QEMU command line parameters are probably "
+                        "not identical on both the source and destination.");
+                    return -EINVAL;
+                }
+                local->block[j].remote_host_addr =
+                        rdma->block[i].remote_host_addr;
+                local->block[j].remote_rkey = rdma->block[i].remote_rkey;
+                break;
+            }
+
+            if (j >= local->nb_blocks) {
+                ERROR(errp, "ram blocks mismatch #3! "
+                        "Your QEMU command line parameters are probably "
+                        "not identical on both the source and destination.");
+                return -EINVAL;
+            }
+        }
+    }
+
+    DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags);
+
+    head.type = RDMA_CONTROL_REGISTER_FINISHED;
+    ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
+
+    if (ret < 0) {
+        goto err;
+    }
+
+    return 0;
+err:
+    rdma->error_state = ret;
+    return ret;
+}
+
+static int qemu_rdma_get_fd(void *opaque)
+{
+    QEMUFileRDMA *rfile = opaque;
+    RDMAContext *rdma = rfile->rdma;
+
+    return rdma->comp_channel->fd;
+}
+
+const QEMUFileOps rdma_read_ops = {
+    .get_buffer    = qemu_rdma_get_buffer,
+    .get_fd        = qemu_rdma_get_fd,
+    .close         = qemu_rdma_close,
+    .hook_ram_load = qemu_rdma_registration_handle,
+};
+
+const QEMUFileOps rdma_write_ops = {
+    .put_buffer         = qemu_rdma_put_buffer,
+    .close              = qemu_rdma_close,
+    .before_ram_iterate = qemu_rdma_registration_start,
+    .after_ram_iterate  = qemu_rdma_registration_stop,
+    .save_page          = qemu_rdma_save_page,
+};
+
+static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
+{
+    QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA));
+
+    if (qemu_file_mode_is_not_valid(mode)) {
+        return NULL;
+    }
+
+    r->rdma = rdma;
+
+    if (mode[0] == 'w') {
+        r->file = qemu_fopen_ops(r, &rdma_write_ops);
+    } else {
+        r->file = qemu_fopen_ops(r, &rdma_read_ops);
+    }
+
+    return r->file;
+}
+
+static void rdma_accept_incoming_migration(void *opaque)
+{
+    RDMAContext *rdma = opaque;
+    int ret;
+    QEMUFile *f;
+    Error *local_err = NULL, **errp = &local_err;
+
+    DPRINTF("Accepting rdma connection...\n");
+    ret = qemu_rdma_accept(rdma);
+
+    if (ret) {
+        ERROR(errp, "RDMA Migration initialization failed!");
+        return;
+    }
+
+    DPRINTF("Accepted migration\n");
+
+    f = qemu_fopen_rdma(rdma, "rb");
+    if (f == NULL) {
+        ERROR(errp, "could not qemu_fopen_rdma!");
+        qemu_rdma_cleanup(rdma);
+        return;
+    }
+
+    rdma->migration_started_on_destination = 1;
+    process_incoming_migration(f);
+}
+
+void rdma_start_incoming_migration(const char *host_port, Error **errp)
+{
+    int ret;
+    RDMAContext *rdma;
+    Error *local_err = NULL;
+
+    DPRINTF("Starting RDMA-based incoming migration\n");
+    rdma = qemu_rdma_data_init(host_port, &local_err);
+
+    if (rdma == NULL) {
+        goto err;
+    }
+
+    ret = qemu_rdma_dest_init(rdma, &local_err);
+
+    if (ret) {
+        goto err;
+    }
+
+    DPRINTF("qemu_rdma_dest_init success\n");
+
+    ret = rdma_listen(rdma->listen_id, 5);
+
+    if (ret) {
+        ERROR(errp, "listening on socket!");
+        goto err;
+    }
+
+    DPRINTF("rdma_listen success\n");
+
+    qemu_set_fd_handler2(rdma->channel->fd, NULL,
+                         rdma_accept_incoming_migration, NULL,
+                            (void *)(intptr_t) rdma);
+    return;
+err:
+    error_propagate(errp, local_err);
+    g_free(rdma);
+}
+
+void rdma_start_outgoing_migration(void *opaque,
+                            const char *host_port, Error **errp)
+{
+    MigrationState *s = opaque;
+    Error *local_err = NULL, **temp = &local_err;
+    RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err);
+    int ret = 0;
+
+    if (rdma == NULL) {
+        ERROR(temp, "Failed to initialize RDMA data structures! %d", ret);
+        goto err;
+    }
+
+    ret = qemu_rdma_source_init(rdma, &local_err,
+        s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
+
+    if (ret) {
+        goto err;
+    }
+
+    DPRINTF("qemu_rdma_source_init success\n");
+    ret = qemu_rdma_connect(rdma, &local_err);
+
+    if (ret) {
+        goto err;
+    }
+
+    DPRINTF("qemu_rdma_source_connect success\n");
+
+    s->file = qemu_fopen_rdma(rdma, "wb");
+    migrate_fd_connect(s);
+    return;
+err:
+    error_propagate(errp, local_err);
+    g_free(rdma);
+    migrate_fd_error(s);
+}
diff --git a/migration/migration-tcp.c b/migration/migration-tcp.c
new file mode 100644 (file)
index 0000000..91c9cf3
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * QEMU live migration
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ *  Anthony Liguori   <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include <string.h>
+
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "qemu/sockets.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "block/block.h"
+#include "qemu/main-loop.h"
+
+//#define DEBUG_MIGRATION_TCP
+
+#ifdef DEBUG_MIGRATION_TCP
+#define DPRINTF(fmt, ...) \
+    do { printf("migration-tcp: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+static void tcp_wait_for_connect(int fd, Error *err, void *opaque)
+{
+    MigrationState *s = opaque;
+
+    if (fd < 0) {
+        DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
+        s->file = NULL;
+        migrate_fd_error(s);
+    } else {
+        DPRINTF("migrate connect success\n");
+        s->file = qemu_fopen_socket(fd, "wb");
+        migrate_fd_connect(s);
+    }
+}
+
+void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp)
+{
+    inet_nonblocking_connect(host_port, tcp_wait_for_connect, s, errp);
+}
+
+static void tcp_accept_incoming_migration(void *opaque)
+{
+    struct sockaddr_in addr;
+    socklen_t addrlen = sizeof(addr);
+    int s = (intptr_t)opaque;
+    QEMUFile *f;
+    int c, err;
+
+    do {
+        c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
+        err = socket_error();
+    } while (c < 0 && err == EINTR);
+    qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
+    closesocket(s);
+
+    DPRINTF("accepted migration\n");
+
+    if (c < 0) {
+        error_report("could not accept migration connection (%s)",
+                     strerror(err));
+        return;
+    }
+
+    f = qemu_fopen_socket(c, "rb");
+    if (f == NULL) {
+        error_report("could not qemu_fopen socket");
+        goto out;
+    }
+
+    process_incoming_migration(f);
+    return;
+
+out:
+    closesocket(c);
+}
+
+void tcp_start_incoming_migration(const char *host_port, Error **errp)
+{
+    int s;
+
+    s = inet_listen(host_port, NULL, 256, SOCK_STREAM, 0, errp);
+    if (s < 0) {
+        return;
+    }
+
+    qemu_set_fd_handler2(s, NULL, tcp_accept_incoming_migration, NULL,
+                         (void *)(intptr_t)s);
+}
diff --git a/migration/migration-unix.c b/migration/migration-unix.c
new file mode 100644 (file)
index 0000000..1cdadfb
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * QEMU live migration via Unix Domain Sockets
+ *
+ * Copyright Red Hat, Inc. 2009
+ *
+ * Authors:
+ *  Chris Lalancette <clalance@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include <string.h>
+
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "qemu/sockets.h"
+#include "qemu/main-loop.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "block/block.h"
+
+//#define DEBUG_MIGRATION_UNIX
+
+#ifdef DEBUG_MIGRATION_UNIX
+#define DPRINTF(fmt, ...) \
+    do { printf("migration-unix: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+static void unix_wait_for_connect(int fd, Error *err, void *opaque)
+{
+    MigrationState *s = opaque;
+
+    if (fd < 0) {
+        DPRINTF("migrate connect error: %s\n", error_get_pretty(err));
+        s->file = NULL;
+        migrate_fd_error(s);
+    } else {
+        DPRINTF("migrate connect success\n");
+        s->file = qemu_fopen_socket(fd, "wb");
+        migrate_fd_connect(s);
+    }
+}
+
+void unix_start_outgoing_migration(MigrationState *s, const char *path, Error **errp)
+{
+    unix_nonblocking_connect(path, unix_wait_for_connect, s, errp);
+}
+
+static void unix_accept_incoming_migration(void *opaque)
+{
+    struct sockaddr_un addr;
+    socklen_t addrlen = sizeof(addr);
+    int s = (intptr_t)opaque;
+    QEMUFile *f;
+    int c, err;
+
+    do {
+        c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen);
+        err = errno;
+    } while (c < 0 && err == EINTR);
+    qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
+    close(s);
+
+    DPRINTF("accepted migration\n");
+
+    if (c < 0) {
+        error_report("could not accept migration connection (%s)",
+                     strerror(err));
+        return;
+    }
+
+    f = qemu_fopen_socket(c, "rb");
+    if (f == NULL) {
+        error_report("could not qemu_fopen socket");
+        goto out;
+    }
+
+    process_incoming_migration(f);
+    return;
+
+out:
+    close(c);
+}
+
+void unix_start_incoming_migration(const char *path, Error **errp)
+{
+    int s;
+
+    s = unix_listen(path, NULL, 0, errp);
+    if (s < 0) {
+        return;
+    }
+
+    qemu_set_fd_handler2(s, NULL, unix_accept_incoming_migration, NULL,
+                         (void *)(intptr_t)s);
+}
diff --git a/migration/migration.c b/migration/migration.c
new file mode 100644 (file)
index 0000000..c49a05a
--- /dev/null
@@ -0,0 +1,700 @@
+/*
+ * QEMU live migration
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ *  Anthony Liguori   <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu-common.h"
+#include "qemu/main-loop.h"
+#include "migration/migration.h"
+#include "monitor/monitor.h"
+#include "migration/qemu-file.h"
+#include "sysemu/sysemu.h"
+#include "block/block.h"
+#include "qemu/sockets.h"
+#include "migration/block.h"
+#include "qemu/thread.h"
+#include "qmp-commands.h"
+#include "trace.h"
+
+enum {
+    MIG_STATE_ERROR = -1,
+    MIG_STATE_NONE,
+    MIG_STATE_SETUP,
+    MIG_STATE_CANCELLING,
+    MIG_STATE_CANCELLED,
+    MIG_STATE_ACTIVE,
+    MIG_STATE_COMPLETED,
+};
+
+#define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
+
+/* Amount of time to allocate to each "chunk" of bandwidth-throttled
+ * data. */
+#define BUFFER_DELAY     100
+#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
+
+/* Migration XBZRLE default cache size */
+#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
+
+static NotifierList migration_state_notifiers =
+    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
+
+/* When we add fault tolerance, we could have several
+   migrations at once.  For now we don't need to add
+   dynamic creation of migration */
+
+MigrationState *migrate_get_current(void)
+{
+    static MigrationState current_migration = {
+        .state = MIG_STATE_NONE,
+        .bandwidth_limit = MAX_THROTTLE,
+        .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
+        .mbps = -1,
+    };
+
+    return &current_migration;
+}
+
+void qemu_start_incoming_migration(const char *uri, Error **errp)
+{
+    const char *p;
+
+    if (strstart(uri, "tcp:", &p))
+        tcp_start_incoming_migration(p, errp);
+#ifdef CONFIG_RDMA
+    else if (strstart(uri, "rdma:", &p))
+        rdma_start_incoming_migration(p, errp);
+#endif
+#if !defined(WIN32)
+    else if (strstart(uri, "exec:", &p))
+        exec_start_incoming_migration(p, errp);
+    else if (strstart(uri, "unix:", &p))
+        unix_start_incoming_migration(p, errp);
+    else if (strstart(uri, "fd:", &p))
+        fd_start_incoming_migration(p, errp);
+#endif
+    else {
+        error_setg(errp, "unknown migration protocol: %s", uri);
+    }
+}
+
+static void process_incoming_migration_co(void *opaque)
+{
+    QEMUFile *f = opaque;
+    Error *local_err = NULL;
+    int ret;
+
+    ret = qemu_loadvm_state(f);
+    qemu_fclose(f);
+    free_xbzrle_decoded_buf();
+    if (ret < 0) {
+        error_report("load of migration failed: %s", strerror(-ret));
+        exit(EXIT_FAILURE);
+    }
+    qemu_announce_self();
+
+    /* Make sure all file formats flush their mutable metadata */
+    bdrv_invalidate_cache_all(&local_err);
+    if (local_err) {
+        qerror_report_err(local_err);
+        error_free(local_err);
+        exit(EXIT_FAILURE);
+    }
+
+    if (autostart) {
+        vm_start();
+    } else {
+        runstate_set(RUN_STATE_PAUSED);
+    }
+}
+
+void process_incoming_migration(QEMUFile *f)
+{
+    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
+    int fd = qemu_get_fd(f);
+
+    assert(fd != -1);
+    qemu_set_nonblock(fd);
+    qemu_coroutine_enter(co, f);
+}
+
+/* amount of nanoseconds we are willing to wait for migration to be down.
+ * the choice of nanoseconds is because it is the maximum resolution that
+ * get_clock() can achieve. It is an internal measure. All user-visible
+ * units must be in seconds */
+static uint64_t max_downtime = 300000000;
+
+uint64_t migrate_max_downtime(void)
+{
+    return max_downtime;
+}
+
+MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
+{
+    MigrationCapabilityStatusList *head = NULL;
+    MigrationCapabilityStatusList *caps;
+    MigrationState *s = migrate_get_current();
+    int i;
+
+    caps = NULL; /* silence compiler warning */
+    for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
+        if (head == NULL) {
+            head = g_malloc0(sizeof(*caps));
+            caps = head;
+        } else {
+            caps->next = g_malloc0(sizeof(*caps));
+            caps = caps->next;
+        }
+        caps->value =
+            g_malloc(sizeof(*caps->value));
+        caps->value->capability = i;
+        caps->value->state = s->enabled_capabilities[i];
+    }
+
+    return head;
+}
+
+static void get_xbzrle_cache_stats(MigrationInfo *info)
+{
+    if (migrate_use_xbzrle()) {
+        info->has_xbzrle_cache = true;
+        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
+        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
+        info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
+        info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
+        info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
+        info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
+        info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
+    }
+}
+
+MigrationInfo *qmp_query_migrate(Error **errp)
+{
+    MigrationInfo *info = g_malloc0(sizeof(*info));
+    MigrationState *s = migrate_get_current();
+
+    switch (s->state) {
+    case MIG_STATE_NONE:
+        /* no migration has happened ever */
+        break;
+    case MIG_STATE_SETUP:
+        info->has_status = true;
+        info->status = g_strdup("setup");
+        info->has_total_time = false;
+        break;
+    case MIG_STATE_ACTIVE:
+    case MIG_STATE_CANCELLING:
+        info->has_status = true;
+        info->status = g_strdup("active");
+        info->has_total_time = true;
+        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
+            - s->total_time;
+        info->has_expected_downtime = true;
+        info->expected_downtime = s->expected_downtime;
+        info->has_setup_time = true;
+        info->setup_time = s->setup_time;
+
+        info->has_ram = true;
+        info->ram = g_malloc0(sizeof(*info->ram));
+        info->ram->transferred = ram_bytes_transferred();
+        info->ram->remaining = ram_bytes_remaining();
+        info->ram->total = ram_bytes_total();
+        info->ram->duplicate = dup_mig_pages_transferred();
+        info->ram->skipped = skipped_mig_pages_transferred();
+        info->ram->normal = norm_mig_pages_transferred();
+        info->ram->normal_bytes = norm_mig_bytes_transferred();
+        info->ram->dirty_pages_rate = s->dirty_pages_rate;
+        info->ram->mbps = s->mbps;
+        info->ram->dirty_sync_count = s->dirty_sync_count;
+
+        if (blk_mig_active()) {
+            info->has_disk = true;
+            info->disk = g_malloc0(sizeof(*info->disk));
+            info->disk->transferred = blk_mig_bytes_transferred();
+            info->disk->remaining = blk_mig_bytes_remaining();
+            info->disk->total = blk_mig_bytes_total();
+        }
+
+        get_xbzrle_cache_stats(info);
+        break;
+    case MIG_STATE_COMPLETED:
+        get_xbzrle_cache_stats(info);
+
+        info->has_status = true;
+        info->status = g_strdup("completed");
+        info->has_total_time = true;
+        info->total_time = s->total_time;
+        info->has_downtime = true;
+        info->downtime = s->downtime;
+        info->has_setup_time = true;
+        info->setup_time = s->setup_time;
+
+        info->has_ram = true;
+        info->ram = g_malloc0(sizeof(*info->ram));
+        info->ram->transferred = ram_bytes_transferred();
+        info->ram->remaining = 0;
+        info->ram->total = ram_bytes_total();
+        info->ram->duplicate = dup_mig_pages_transferred();
+        info->ram->skipped = skipped_mig_pages_transferred();
+        info->ram->normal = norm_mig_pages_transferred();
+        info->ram->normal_bytes = norm_mig_bytes_transferred();
+        info->ram->mbps = s->mbps;
+        info->ram->dirty_sync_count = s->dirty_sync_count;
+        break;
+    case MIG_STATE_ERROR:
+        info->has_status = true;
+        info->status = g_strdup("failed");
+        break;
+    case MIG_STATE_CANCELLED:
+        info->has_status = true;
+        info->status = g_strdup("cancelled");
+        break;
+    }
+
+    return info;
+}
+
+void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
+                                  Error **errp)
+{
+    MigrationState *s = migrate_get_current();
+    MigrationCapabilityStatusList *cap;
+
+    if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
+        error_set(errp, QERR_MIGRATION_ACTIVE);
+        return;
+    }
+
+    for (cap = params; cap; cap = cap->next) {
+        s->enabled_capabilities[cap->value->capability] = cap->value->state;
+    }
+}
+
+/* shared migration helpers */
+
+static void migrate_set_state(MigrationState *s, int old_state, int new_state)
+{
+    if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) {
+        trace_migrate_set_state(new_state);
+    }
+}
+
+static void migrate_fd_cleanup(void *opaque)
+{
+    MigrationState *s = opaque;
+
+    qemu_bh_delete(s->cleanup_bh);
+    s->cleanup_bh = NULL;
+
+    if (s->file) {
+        trace_migrate_fd_cleanup();
+        qemu_mutex_unlock_iothread();
+        qemu_thread_join(&s->thread);
+        qemu_mutex_lock_iothread();
+
+        qemu_fclose(s->file);
+        s->file = NULL;
+    }
+
+    assert(s->state != MIG_STATE_ACTIVE);
+
+    if (s->state != MIG_STATE_COMPLETED) {
+        qemu_savevm_state_cancel();
+        if (s->state == MIG_STATE_CANCELLING) {
+            migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED);
+        }
+    }
+
+    notifier_list_notify(&migration_state_notifiers, s);
+}
+
+void migrate_fd_error(MigrationState *s)
+{
+    trace_migrate_fd_error();
+    assert(s->file == NULL);
+    s->state = MIG_STATE_ERROR;
+    trace_migrate_set_state(MIG_STATE_ERROR);
+    notifier_list_notify(&migration_state_notifiers, s);
+}
+
+static void migrate_fd_cancel(MigrationState *s)
+{
+    int old_state ;
+    trace_migrate_fd_cancel();
+
+    do {
+        old_state = s->state;
+        if (old_state != MIG_STATE_SETUP && old_state != MIG_STATE_ACTIVE) {
+            break;
+        }
+        migrate_set_state(s, old_state, MIG_STATE_CANCELLING);
+    } while (s->state != MIG_STATE_CANCELLING);
+}
+
+void add_migration_state_change_notifier(Notifier *notify)
+{
+    notifier_list_add(&migration_state_notifiers, notify);
+}
+
+void remove_migration_state_change_notifier(Notifier *notify)
+{
+    notifier_remove(notify);
+}
+
+bool migration_in_setup(MigrationState *s)
+{
+    return s->state == MIG_STATE_SETUP;
+}
+
+bool migration_has_finished(MigrationState *s)
+{
+    return s->state == MIG_STATE_COMPLETED;
+}
+
+bool migration_has_failed(MigrationState *s)
+{
+    return (s->state == MIG_STATE_CANCELLED ||
+            s->state == MIG_STATE_ERROR);
+}
+
+static MigrationState *migrate_init(const MigrationParams *params)
+{
+    MigrationState *s = migrate_get_current();
+    int64_t bandwidth_limit = s->bandwidth_limit;
+    bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
+    int64_t xbzrle_cache_size = s->xbzrle_cache_size;
+
+    memcpy(enabled_capabilities, s->enabled_capabilities,
+           sizeof(enabled_capabilities));
+
+    memset(s, 0, sizeof(*s));
+    s->params = *params;
+    memcpy(s->enabled_capabilities, enabled_capabilities,
+           sizeof(enabled_capabilities));
+    s->xbzrle_cache_size = xbzrle_cache_size;
+
+    s->bandwidth_limit = bandwidth_limit;
+    s->state = MIG_STATE_SETUP;
+    trace_migrate_set_state(MIG_STATE_SETUP);
+
+    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+    return s;
+}
+
+static GSList *migration_blockers;
+
+void migrate_add_blocker(Error *reason)
+{
+    migration_blockers = g_slist_prepend(migration_blockers, reason);
+}
+
+void migrate_del_blocker(Error *reason)
+{
+    migration_blockers = g_slist_remove(migration_blockers, reason);
+}
+
+void qmp_migrate(const char *uri, bool has_blk, bool blk,
+                 bool has_inc, bool inc, bool has_detach, bool detach,
+                 Error **errp)
+{
+    Error *local_err = NULL;
+    MigrationState *s = migrate_get_current();
+    MigrationParams params;
+    const char *p;
+
+    params.blk = has_blk && blk;
+    params.shared = has_inc && inc;
+
+    if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP ||
+        s->state == MIG_STATE_CANCELLING) {
+        error_set(errp, QERR_MIGRATION_ACTIVE);
+        return;
+    }
+
+    if (runstate_check(RUN_STATE_INMIGRATE)) {
+        error_setg(errp, "Guest is waiting for an incoming migration");
+        return;
+    }
+
+    if (qemu_savevm_state_blocked(errp)) {
+        return;
+    }
+
+    if (migration_blockers) {
+        *errp = error_copy(migration_blockers->data);
+        return;
+    }
+
+    s = migrate_init(&params);
+
+    if (strstart(uri, "tcp:", &p)) {
+        tcp_start_outgoing_migration(s, p, &local_err);
+#ifdef CONFIG_RDMA
+    } else if (strstart(uri, "rdma:", &p)) {
+        rdma_start_outgoing_migration(s, p, &local_err);
+#endif
+#if !defined(WIN32)
+    } else if (strstart(uri, "exec:", &p)) {
+        exec_start_outgoing_migration(s, p, &local_err);
+    } else if (strstart(uri, "unix:", &p)) {
+        unix_start_outgoing_migration(s, p, &local_err);
+    } else if (strstart(uri, "fd:", &p)) {
+        fd_start_outgoing_migration(s, p, &local_err);
+#endif
+    } else {
+        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
+        s->state = MIG_STATE_ERROR;
+        return;
+    }
+
+    if (local_err) {
+        migrate_fd_error(s);
+        error_propagate(errp, local_err);
+        return;
+    }
+}
+
+void qmp_migrate_cancel(Error **errp)
+{
+    migrate_fd_cancel(migrate_get_current());
+}
+
+void qmp_migrate_set_cache_size(int64_t value, Error **errp)
+{
+    MigrationState *s = migrate_get_current();
+    int64_t new_size;
+
+    /* Check for truncation */
+    if (value != (size_t)value) {
+        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
+                  "exceeding address space");
+        return;
+    }
+
+    /* Cache should not be larger than guest ram size */
+    if (value > ram_bytes_total()) {
+        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
+                  "exceeds guest ram size ");
+        return;
+    }
+
+    new_size = xbzrle_cache_resize(value);
+    if (new_size < 0) {
+        error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
+                  "is smaller than page size");
+        return;
+    }
+
+    s->xbzrle_cache_size = new_size;
+}
+
+int64_t qmp_query_migrate_cache_size(Error **errp)
+{
+    return migrate_xbzrle_cache_size();
+}
+
+void qmp_migrate_set_speed(int64_t value, Error **errp)
+{
+    MigrationState *s;
+
+    if (value < 0) {
+        value = 0;
+    }
+    if (value > SIZE_MAX) {
+        value = SIZE_MAX;
+    }
+
+    s = migrate_get_current();
+    s->bandwidth_limit = value;
+    if (s->file) {
+        qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
+    }
+}
+
+void qmp_migrate_set_downtime(double value, Error **errp)
+{
+    value *= 1e9;
+    value = MAX(0, MIN(UINT64_MAX, value));
+    max_downtime = (uint64_t)value;
+}
+
+bool migrate_rdma_pin_all(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
+}
+
+bool migrate_auto_converge(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
+}
+
+bool migrate_zero_blocks(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
+}
+
+int migrate_use_xbzrle(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
+}
+
+int64_t migrate_xbzrle_cache_size(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->xbzrle_cache_size;
+}
+
+/* migration thread support */
+
+static void *migration_thread(void *opaque)
+{
+    MigrationState *s = opaque;
+    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
+    int64_t initial_bytes = 0;
+    int64_t max_size = 0;
+    int64_t start_time = initial_time;
+    bool old_vm_running = false;
+
+    qemu_savevm_state_begin(s->file, &s->params);
+
+    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
+    migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
+
+    while (s->state == MIG_STATE_ACTIVE) {
+        int64_t current_time;
+        uint64_t pending_size;
+
+        if (!qemu_file_rate_limit(s->file)) {
+            pending_size = qemu_savevm_state_pending(s->file, max_size);
+            trace_migrate_pending(pending_size, max_size);
+            if (pending_size && pending_size >= max_size) {
+                qemu_savevm_state_iterate(s->file);
+            } else {
+                int ret;
+
+                qemu_mutex_lock_iothread();
+                start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+                qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
+                old_vm_running = runstate_is_running();
+
+                ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+                if (ret >= 0) {
+                    qemu_file_set_rate_limit(s->file, INT64_MAX);
+                    qemu_savevm_state_complete(s->file);
+                }
+                qemu_mutex_unlock_iothread();
+
+                if (ret < 0) {
+                    migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
+                    break;
+                }
+
+                if (!qemu_file_get_error(s->file)) {
+                    migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
+                    break;
+                }
+            }
+        }
+
+        if (qemu_file_get_error(s->file)) {
+            migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
+            break;
+        }
+        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+        if (current_time >= initial_time + BUFFER_DELAY) {
+            uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
+            uint64_t time_spent = current_time - initial_time;
+            double bandwidth = transferred_bytes / time_spent;
+            max_size = bandwidth * migrate_max_downtime() / 1000000;
+
+            s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
+                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
+
+            trace_migrate_transferred(transferred_bytes, time_spent,
+                                      bandwidth, max_size);
+            /* if we haven't sent anything, we don't want to recalculate
+               10000 is a small enough number for our purposes */
+            if (s->dirty_bytes_rate && transferred_bytes > 10000) {
+                s->expected_downtime = s->dirty_bytes_rate / bandwidth;
+            }
+
+            qemu_file_reset_rate_limit(s->file);
+            initial_time = current_time;
+            initial_bytes = qemu_ftell(s->file);
+        }
+        if (qemu_file_rate_limit(s->file)) {
+            /* usleep expects microseconds */
+            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
+        }
+    }
+
+    qemu_mutex_lock_iothread();
+    if (s->state == MIG_STATE_COMPLETED) {
+        int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+        uint64_t transferred_bytes = qemu_ftell(s->file);
+        s->total_time = end_time - s->total_time;
+        s->downtime = end_time - start_time;
+        if (s->total_time) {
+            s->mbps = (((double) transferred_bytes * 8.0) /
+                       ((double) s->total_time)) / 1000;
+        }
+        runstate_set(RUN_STATE_POSTMIGRATE);
+    } else {
+        if (old_vm_running) {
+            vm_start();
+        }
+    }
+    qemu_bh_schedule(s->cleanup_bh);
+    qemu_mutex_unlock_iothread();
+
+    return NULL;
+}
+
+void migrate_fd_connect(MigrationState *s)
+{
+    s->state = MIG_STATE_SETUP;
+    trace_migrate_set_state(MIG_STATE_SETUP);
+
+    /* This is a best 1st approximation. ns to ms */
+    s->expected_downtime = max_downtime/1000000;
+    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
+
+    qemu_file_set_rate_limit(s->file,
+                             s->bandwidth_limit / XFER_LIMIT_RATIO);
+
+    /* Notify before starting migration thread */
+    notifier_list_notify(&migration_state_notifiers, s);
+
+    qemu_thread_create(&s->thread, "migration", migration_thread, s,
+                       QEMU_THREAD_JOINABLE);
+}
diff --git a/migration/qemu-file-stdio.c b/migration/qemu-file-stdio.c
new file mode 100644 (file)
index 0000000..285068b
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu-common.h"
+#include "block/coroutine.h"
+#include "migration/qemu-file.h"
+
+typedef struct QEMUFileStdio {
+    FILE *stdio_file;
+    QEMUFile *file;
+} QEMUFileStdio;
+
+static int stdio_get_fd(void *opaque)
+{
+    QEMUFileStdio *s = opaque;
+
+    return fileno(s->stdio_file);
+}
+
+static int stdio_put_buffer(void *opaque, const uint8_t *buf, int64_t pos,
+                            int size)
+{
+    QEMUFileStdio *s = opaque;
+    int res;
+
+    res = fwrite(buf, 1, size, s->stdio_file);
+
+    if (res != size) {
+        return -errno;
+    }
+    return res;
+}
+
+static int stdio_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
+{
+    QEMUFileStdio *s = opaque;
+    FILE *fp = s->stdio_file;
+    int bytes;
+
+    for (;;) {
+        clearerr(fp);
+        bytes = fread(buf, 1, size, fp);
+        if (bytes != 0 || !ferror(fp)) {
+            break;
+        }
+        if (errno == EAGAIN) {
+            yield_until_fd_readable(fileno(fp));
+        } else if (errno != EINTR) {
+            break;
+        }
+    }
+    return bytes;
+}
+
+static int stdio_pclose(void *opaque)
+{
+    QEMUFileStdio *s = opaque;
+    int ret;
+    ret = pclose(s->stdio_file);
+    if (ret == -1) {
+        ret = -errno;
+    } else if (!WIFEXITED(ret) || WEXITSTATUS(ret) != 0) {
+        /* close succeeded, but non-zero exit code: */
+        ret = -EIO; /* fake errno value */
+    }
+    g_free(s);
+    return ret;
+}
+
+static int stdio_fclose(void *opaque)
+{
+    QEMUFileStdio *s = opaque;
+    int ret = 0;
+
+    if (qemu_file_is_writable(s->file)) {
+        int fd = fileno(s->stdio_file);
+        struct stat st;
+
+        ret = fstat(fd, &st);
+        if (ret == 0 && S_ISREG(st.st_mode)) {
+            /*
+             * If the file handle is a regular file make sure the
+             * data is flushed to disk before signaling success.
+             */
+            ret = fsync(fd);
+            if (ret != 0) {
+                ret = -errno;
+                return ret;
+            }
+        }
+    }
+    if (fclose(s->stdio_file) == EOF) {
+        ret = -errno;
+    }
+    g_free(s);
+    return ret;
+}
+
+static const QEMUFileOps stdio_pipe_read_ops = {
+    .get_fd =     stdio_get_fd,
+    .get_buffer = stdio_get_buffer,
+    .close =      stdio_pclose
+};
+
+static const QEMUFileOps stdio_pipe_write_ops = {
+    .get_fd =     stdio_get_fd,
+    .put_buffer = stdio_put_buffer,
+    .close =      stdio_pclose
+};
+
+QEMUFile *qemu_popen_cmd(const char *command, const char *mode)
+{
+    FILE *stdio_file;
+    QEMUFileStdio *s;
+
+    if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') || mode[1] != 0) {
+        fprintf(stderr, "qemu_popen: Argument validity check failed\n");
+        return NULL;
+    }
+
+    stdio_file = popen(command, mode);
+    if (stdio_file == NULL) {
+        return NULL;
+    }
+
+    s = g_malloc0(sizeof(QEMUFileStdio));
+
+    s->stdio_file = stdio_file;
+
+    if (mode[0] == 'r') {
+        s->file = qemu_fopen_ops(s, &stdio_pipe_read_ops);
+    } else {
+        s->file = qemu_fopen_ops(s, &stdio_pipe_write_ops);
+    }
+    return s->file;
+}
+
+static const QEMUFileOps stdio_file_read_ops = {
+    .get_fd =     stdio_get_fd,
+    .get_buffer = stdio_get_buffer,
+    .close =      stdio_fclose
+};
+
+static const QEMUFileOps stdio_file_write_ops = {
+    .get_fd =     stdio_get_fd,
+    .put_buffer = stdio_put_buffer,
+    .close =      stdio_fclose
+};
+
+QEMUFile *qemu_fopen(const char *filename, const char *mode)
+{
+    QEMUFileStdio *s;
+
+    if (qemu_file_mode_is_not_valid(mode)) {
+        return NULL;
+    }
+
+    s = g_malloc0(sizeof(QEMUFileStdio));
+
+    s->stdio_file = fopen(filename, mode);
+    if (!s->stdio_file) {
+        goto fail;
+    }
+
+    if (mode[0] == 'w') {
+        s->file = qemu_fopen_ops(s, &stdio_file_write_ops);
+    } else {
+        s->file = qemu_fopen_ops(s, &stdio_file_read_ops);
+    }
+    return s->file;
+fail:
+    g_free(s);
+    return NULL;
+}
diff --git a/migration/qemu-file-unix.c b/migration/qemu-file-unix.c
new file mode 100644 (file)
index 0000000..9682396
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu-common.h"
+#include "qemu/iov.h"
+#include "qemu/sockets.h"
+#include "block/coroutine.h"
+#include "migration/qemu-file.h"
+
+typedef struct QEMUFileSocket {
+    int fd;
+    QEMUFile *file;
+} QEMUFileSocket;
+
+static ssize_t socket_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
+                                    int64_t pos)
+{
+    QEMUFileSocket *s = opaque;
+    ssize_t len;
+    ssize_t size = iov_size(iov, iovcnt);
+
+    len = iov_send(s->fd, iov, iovcnt, 0, size);
+    if (len < size) {
+        len = -socket_error();
+    }
+    return len;
+}
+
+static int socket_get_fd(void *opaque)
+{
+    QEMUFileSocket *s = opaque;
+
+    return s->fd;
+}
+
+static int socket_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
+{
+    QEMUFileSocket *s = opaque;
+    ssize_t len;
+
+    for (;;) {
+        len = qemu_recv(s->fd, buf, size, 0);
+        if (len != -1) {
+            break;
+        }
+        if (socket_error() == EAGAIN) {
+            yield_until_fd_readable(s->fd);
+        } else if (socket_error() != EINTR) {
+            break;
+        }
+    }
+
+    if (len == -1) {
+        len = -socket_error();
+    }
+    return len;
+}
+
+static int socket_close(void *opaque)
+{
+    QEMUFileSocket *s = opaque;
+    closesocket(s->fd);
+    g_free(s);
+    return 0;
+}
+
+static ssize_t unix_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
+                                  int64_t pos)
+{
+    QEMUFileSocket *s = opaque;
+    ssize_t len, offset;
+    ssize_t size = iov_size(iov, iovcnt);
+    ssize_t total = 0;
+
+    assert(iovcnt > 0);
+    offset = 0;
+    while (size > 0) {
+        /* Find the next start position; skip all full-sized vector elements  */
+        while (offset >= iov[0].iov_len) {
+            offset -= iov[0].iov_len;
+            iov++, iovcnt--;
+        }
+
+        /* skip `offset' bytes from the (now) first element, undo it on exit */
+        assert(iovcnt > 0);
+        iov[0].iov_base += offset;
+        iov[0].iov_len -= offset;
+
+        do {
+            len = writev(s->fd, iov, iovcnt);
+        } while (len == -1 && errno == EINTR);
+        if (len == -1) {
+            return -errno;
+        }
+
+        /* Undo the changes above */
+        iov[0].iov_base -= offset;
+        iov[0].iov_len += offset;
+
+        /* Prepare for the next iteration */
+        offset += len;
+        total += len;
+        size -= len;
+    }
+
+    return total;
+}
+
+static int unix_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
+{
+    QEMUFileSocket *s = opaque;
+    ssize_t len;
+
+    for (;;) {
+        len = read(s->fd, buf, size);
+        if (len != -1) {
+            break;
+        }
+        if (errno == EAGAIN) {
+            yield_until_fd_readable(s->fd);
+        } else if (errno != EINTR) {
+            break;
+        }
+    }
+
+    if (len == -1) {
+        len = -errno;
+    }
+    return len;
+}
+
+static int unix_close(void *opaque)
+{
+    QEMUFileSocket *s = opaque;
+    close(s->fd);
+    g_free(s);
+    return 0;
+}
+
+static const QEMUFileOps unix_read_ops = {
+    .get_fd =     socket_get_fd,
+    .get_buffer = unix_get_buffer,
+    .close =      unix_close
+};
+
+static const QEMUFileOps unix_write_ops = {
+    .get_fd =     socket_get_fd,
+    .writev_buffer = unix_writev_buffer,
+    .close =      unix_close
+};
+
+QEMUFile *qemu_fdopen(int fd, const char *mode)
+{
+    QEMUFileSocket *s;
+
+    if (mode == NULL ||
+        (mode[0] != 'r' && mode[0] != 'w') ||
+        mode[1] != 'b' || mode[2] != 0) {
+        fprintf(stderr, "qemu_fdopen: Argument validity check failed\n");
+        return NULL;
+    }
+
+    s = g_malloc0(sizeof(QEMUFileSocket));
+    s->fd = fd;
+
+    if (mode[0] == 'r') {
+        s->file = qemu_fopen_ops(s, &unix_read_ops);
+    } else {
+        s->file = qemu_fopen_ops(s, &unix_write_ops);
+    }
+    return s->file;
+}
+
+static const QEMUFileOps socket_read_ops = {
+    .get_fd =     socket_get_fd,
+    .get_buffer = socket_get_buffer,
+    .close =      socket_close
+};
+
+static const QEMUFileOps socket_write_ops = {
+    .get_fd =     socket_get_fd,
+    .writev_buffer = socket_writev_buffer,
+    .close =      socket_close
+};
+
+QEMUFile *qemu_fopen_socket(int fd, const char *mode)
+{
+    QEMUFileSocket *s;
+
+    if (qemu_file_mode_is_not_valid(mode)) {
+        return NULL;
+    }
+
+    s = g_malloc0(sizeof(QEMUFileSocket));
+    s->fd = fd;
+    if (mode[0] == 'w') {
+        qemu_set_block(s->fd);
+        s->file = qemu_fopen_ops(s, &socket_write_ops);
+    } else {
+        s->file = qemu_fopen_ops(s, &socket_read_ops);
+    }
+    return s->file;
+}
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
new file mode 100644 (file)
index 0000000..f938e36
--- /dev/null
@@ -0,0 +1,995 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu-common.h"
+#include "qemu/iov.h"
+#include "qemu/sockets.h"
+#include "block/coroutine.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "trace.h"
+
+#define IO_BUF_SIZE 32768
+#define MAX_IOV_SIZE MIN(IOV_MAX, 64)
+
+struct QEMUFile {
+    const QEMUFileOps *ops;
+    void *opaque;
+
+    int64_t bytes_xfer;
+    int64_t xfer_limit;
+
+    int64_t pos; /* start of buffer when writing, end of buffer
+                    when reading */
+    int buf_index;
+    int buf_size; /* 0 when writing */
+    uint8_t buf[IO_BUF_SIZE];
+
+    struct iovec iov[MAX_IOV_SIZE];
+    unsigned int iovcnt;
+
+    int last_error;
+};
+
+bool qemu_file_mode_is_not_valid(const char *mode)
+{
+    if (mode == NULL ||
+        (mode[0] != 'r' && mode[0] != 'w') ||
+        mode[1] != 'b' || mode[2] != 0) {
+        fprintf(stderr, "qemu_fopen: Argument validity check failed\n");
+        return true;
+    }
+
+    return false;
+}
+
+QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops)
+{
+    QEMUFile *f;
+
+    f = g_malloc0(sizeof(QEMUFile));
+
+    f->opaque = opaque;
+    f->ops = ops;
+    return f;
+}
+
+/*
+ * Get last error for stream f
+ *
+ * Return negative error value if there has been an error on previous
+ * operations, return 0 if no error happened.
+ *
+ */
+int qemu_file_get_error(QEMUFile *f)
+{
+    return f->last_error;
+}
+
+void qemu_file_set_error(QEMUFile *f, int ret)
+{
+    if (f->last_error == 0) {
+        f->last_error = ret;
+    }
+}
+
+bool qemu_file_is_writable(QEMUFile *f)
+{
+    return f->ops->writev_buffer || f->ops->put_buffer;
+}
+
+/**
+ * Flushes QEMUFile buffer
+ *
+ * If there is writev_buffer QEMUFileOps it uses it otherwise uses
+ * put_buffer ops.
+ */
+void qemu_fflush(QEMUFile *f)
+{
+    ssize_t ret = 0;
+
+    if (!qemu_file_is_writable(f)) {
+        return;
+    }
+
+    if (f->ops->writev_buffer) {
+        if (f->iovcnt > 0) {
+            ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos);
+        }
+    } else {
+        if (f->buf_index > 0) {
+            ret = f->ops->put_buffer(f->opaque, f->buf, f->pos, f->buf_index);
+        }
+    }
+    if (ret >= 0) {
+        f->pos += ret;
+    }
+    f->buf_index = 0;
+    f->iovcnt = 0;
+    if (ret < 0) {
+        qemu_file_set_error(f, ret);
+    }
+}
+
+void ram_control_before_iterate(QEMUFile *f, uint64_t flags)
+{
+    int ret = 0;
+
+    if (f->ops->before_ram_iterate) {
+        ret = f->ops->before_ram_iterate(f, f->opaque, flags);
+        if (ret < 0) {
+            qemu_file_set_error(f, ret);
+        }
+    }
+}
+
+void ram_control_after_iterate(QEMUFile *f, uint64_t flags)
+{
+    int ret = 0;
+
+    if (f->ops->after_ram_iterate) {
+        ret = f->ops->after_ram_iterate(f, f->opaque, flags);
+        if (ret < 0) {
+            qemu_file_set_error(f, ret);
+        }
+    }
+}
+
+void ram_control_load_hook(QEMUFile *f, uint64_t flags)
+{
+    int ret = -EINVAL;
+
+    if (f->ops->hook_ram_load) {
+        ret = f->ops->hook_ram_load(f, f->opaque, flags);
+        if (ret < 0) {
+            qemu_file_set_error(f, ret);
+        }
+    } else {
+        qemu_file_set_error(f, ret);
+    }
+}
+
+size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
+                         ram_addr_t offset, size_t size, int *bytes_sent)
+{
+    if (f->ops->save_page) {
+        int ret = f->ops->save_page(f, f->opaque, block_offset,
+                                    offset, size, bytes_sent);
+
+        if (ret != RAM_SAVE_CONTROL_DELAYED) {
+            if (bytes_sent && *bytes_sent > 0) {
+                qemu_update_position(f, *bytes_sent);
+            } else if (ret < 0) {
+                qemu_file_set_error(f, ret);
+            }
+        }
+
+        return ret;
+    }
+
+    return RAM_SAVE_CONTROL_NOT_SUPP;
+}
+
+/*
+ * Attempt to fill the buffer from the underlying file
+ * Returns the number of bytes read, or negative value for an error.
+ *
+ * Note that it can return a partially full buffer even in a not error/not EOF
+ * case if the underlying file descriptor gives a short read, and that can
+ * happen even on a blocking fd.
+ */
+static ssize_t qemu_fill_buffer(QEMUFile *f)
+{
+    int len;
+    int pending;
+
+    assert(!qemu_file_is_writable(f));
+
+    pending = f->buf_size - f->buf_index;
+    if (pending > 0) {
+        memmove(f->buf, f->buf + f->buf_index, pending);
+    }
+    f->buf_index = 0;
+    f->buf_size = pending;
+
+    len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos,
+                        IO_BUF_SIZE - pending);
+    if (len > 0) {
+        f->buf_size += len;
+        f->pos += len;
+    } else if (len == 0) {
+        qemu_file_set_error(f, -EIO);
+    } else if (len != -EAGAIN) {
+        qemu_file_set_error(f, len);
+    }
+
+    return len;
+}
+
+int qemu_get_fd(QEMUFile *f)
+{
+    if (f->ops->get_fd) {
+        return f->ops->get_fd(f->opaque);
+    }
+    return -1;
+}
+
+void qemu_update_position(QEMUFile *f, size_t size)
+{
+    f->pos += size;
+}
+
+/** Closes the file
+ *
+ * Returns negative error value if any error happened on previous operations or
+ * while closing the file. Returns 0 or positive number on success.
+ *
+ * The meaning of return value on success depends on the specific backend
+ * being used.
+ */
+int qemu_fclose(QEMUFile *f)
+{
+    int ret;
+    qemu_fflush(f);
+    ret = qemu_file_get_error(f);
+
+    if (f->ops->close) {
+        int ret2 = f->ops->close(f->opaque);
+        if (ret >= 0) {
+            ret = ret2;
+        }
+    }
+    /* If any error was spotted before closing, we should report it
+     * instead of the close() return value.
+     */
+    if (f->last_error) {
+        ret = f->last_error;
+    }
+    g_free(f);
+    trace_qemu_file_fclose();
+    return ret;
+}
+
+static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size)
+{
+    /* check for adjacent buffer and coalesce them */
+    if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base +
+        f->iov[f->iovcnt - 1].iov_len) {
+        f->iov[f->iovcnt - 1].iov_len += size;
+    } else {
+        f->iov[f->iovcnt].iov_base = (uint8_t *)buf;
+        f->iov[f->iovcnt++].iov_len = size;
+    }
+
+    if (f->iovcnt >= MAX_IOV_SIZE) {
+        qemu_fflush(f);
+    }
+}
+
+void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size)
+{
+    if (!f->ops->writev_buffer) {
+        qemu_put_buffer(f, buf, size);
+        return;
+    }
+
+    if (f->last_error) {
+        return;
+    }
+
+    f->bytes_xfer += size;
+    add_to_iovec(f, buf, size);
+}
+
+void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size)
+{
+    int l;
+
+    if (f->last_error) {
+        return;
+    }
+
+    while (size > 0) {
+        l = IO_BUF_SIZE - f->buf_index;
+        if (l > size) {
+            l = size;
+        }
+        memcpy(f->buf + f->buf_index, buf, l);
+        f->bytes_xfer += l;
+        if (f->ops->writev_buffer) {
+            add_to_iovec(f, f->buf + f->buf_index, l);
+        }
+        f->buf_index += l;
+        if (f->buf_index == IO_BUF_SIZE) {
+            qemu_fflush(f);
+        }
+        if (qemu_file_get_error(f)) {
+            break;
+        }
+        buf += l;
+        size -= l;
+    }
+}
+
+void qemu_put_byte(QEMUFile *f, int v)
+{
+    if (f->last_error) {
+        return;
+    }
+
+    f->buf[f->buf_index] = v;
+    f->bytes_xfer++;
+    if (f->ops->writev_buffer) {
+        add_to_iovec(f, f->buf + f->buf_index, 1);
+    }
+    f->buf_index++;
+    if (f->buf_index == IO_BUF_SIZE) {
+        qemu_fflush(f);
+    }
+}
+
+void qemu_file_skip(QEMUFile *f, int size)
+{
+    if (f->buf_index + size <= f->buf_size) {
+        f->buf_index += size;
+    }
+}
+
+/*
+ * Read 'size' bytes from file (at 'offset') into buf without moving the
+ * pointer.
+ *
+ * It will return size bytes unless there was an error, in which case it will
+ * return as many as it managed to read (assuming blocking fd's which
+ * all current QEMUFile are)
+ */
+int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset)
+{
+    int pending;
+    int index;
+
+    assert(!qemu_file_is_writable(f));
+    assert(offset < IO_BUF_SIZE);
+    assert(size <= IO_BUF_SIZE - offset);
+
+    /* The 1st byte to read from */
+    index = f->buf_index + offset;
+    /* The number of available bytes starting at index */
+    pending = f->buf_size - index;
+
+    /*
+     * qemu_fill_buffer might return just a few bytes, even when there isn't
+     * an error, so loop collecting them until we get enough.
+     */
+    while (pending < size) {
+        int received = qemu_fill_buffer(f);
+
+        if (received <= 0) {
+            break;
+        }
+
+        index = f->buf_index + offset;
+        pending = f->buf_size - index;
+    }
+
+    if (pending <= 0) {
+        return 0;
+    }
+    if (size > pending) {
+        size = pending;
+    }
+
+    memcpy(buf, f->buf + index, size);
+    return size;
+}
+
+/*
+ * Read 'size' bytes of data from the file into buf.
+ * 'size' can be larger than the internal buffer.
+ *
+ * It will return size bytes unless there was an error, in which case it will
+ * return as many as it managed to read (assuming blocking fd's which
+ * all current QEMUFile are)
+ */
+int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
+{
+    int pending = size;
+    int done = 0;
+
+    while (pending > 0) {
+        int res;
+
+        res = qemu_peek_buffer(f, buf, MIN(pending, IO_BUF_SIZE), 0);
+        if (res == 0) {
+            return done;
+        }
+        qemu_file_skip(f, res);
+        buf += res;
+        pending -= res;
+        done += res;
+    }
+    return done;
+}
+
+/*
+ * Peeks a single byte from the buffer; this isn't guaranteed to work if
+ * offset leaves a gap after the previous read/peeked data.
+ */
+int qemu_peek_byte(QEMUFile *f, int offset)
+{
+    int index = f->buf_index + offset;
+
+    assert(!qemu_file_is_writable(f));
+    assert(offset < IO_BUF_SIZE);
+
+    if (index >= f->buf_size) {
+        qemu_fill_buffer(f);
+        index = f->buf_index + offset;
+        if (index >= f->buf_size) {
+            return 0;
+        }
+    }
+    return f->buf[index];
+}
+
+int qemu_get_byte(QEMUFile *f)
+{
+    int result;
+
+    result = qemu_peek_byte(f, 0);
+    qemu_file_skip(f, 1);
+    return result;
+}
+
+int64_t qemu_ftell(QEMUFile *f)
+{
+    qemu_fflush(f);
+    return f->pos;
+}
+
+int qemu_file_rate_limit(QEMUFile *f)
+{
+    if (qemu_file_get_error(f)) {
+        return 1;
+    }
+    if (f->xfer_limit > 0 && f->bytes_xfer > f->xfer_limit) {
+        return 1;
+    }
+    return 0;
+}
+
+int64_t qemu_file_get_rate_limit(QEMUFile *f)
+{
+    return f->xfer_limit;
+}
+
+void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit)
+{
+    f->xfer_limit = limit;
+}
+
+void qemu_file_reset_rate_limit(QEMUFile *f)
+{
+    f->bytes_xfer = 0;
+}
+
+void qemu_put_be16(QEMUFile *f, unsigned int v)
+{
+    qemu_put_byte(f, v >> 8);
+    qemu_put_byte(f, v);
+}
+
+void qemu_put_be32(QEMUFile *f, unsigned int v)
+{
+    qemu_put_byte(f, v >> 24);
+    qemu_put_byte(f, v >> 16);
+    qemu_put_byte(f, v >> 8);
+    qemu_put_byte(f, v);
+}
+
+void qemu_put_be64(QEMUFile *f, uint64_t v)
+{
+    qemu_put_be32(f, v >> 32);
+    qemu_put_be32(f, v);
+}
+
+unsigned int qemu_get_be16(QEMUFile *f)
+{
+    unsigned int v;
+    v = qemu_get_byte(f) << 8;
+    v |= qemu_get_byte(f);
+    return v;
+}
+
+unsigned int qemu_get_be32(QEMUFile *f)
+{
+    unsigned int v;
+    v = qemu_get_byte(f) << 24;
+    v |= qemu_get_byte(f) << 16;
+    v |= qemu_get_byte(f) << 8;
+    v |= qemu_get_byte(f);
+    return v;
+}
+
+uint64_t qemu_get_be64(QEMUFile *f)
+{
+    uint64_t v;
+    v = (uint64_t)qemu_get_be32(f) << 32;
+    v |= qemu_get_be32(f);
+    return v;
+}
+
+#define QSB_CHUNK_SIZE      (1 << 10)
+#define QSB_MAX_CHUNK_SIZE  (16 * QSB_CHUNK_SIZE)
+
+/**
+ * Create a QEMUSizedBuffer
+ * This type of buffer uses scatter-gather lists internally and
+ * can grow to any size. Any data array in the scatter-gather list
+ * can hold different amount of bytes.
+ *
+ * @buffer: Optional buffer to copy into the QSB
+ * @len: size of initial buffer; if @buffer is given, buffer must
+ *       hold at least len bytes
+ *
+ * Returns a pointer to a QEMUSizedBuffer or NULL on allocation failure
+ */
+QEMUSizedBuffer *qsb_create(const uint8_t *buffer, size_t len)
+{
+    QEMUSizedBuffer *qsb;
+    size_t alloc_len, num_chunks, i, to_copy;
+    size_t chunk_size = (len > QSB_MAX_CHUNK_SIZE)
+                        ? QSB_MAX_CHUNK_SIZE
+                        : QSB_CHUNK_SIZE;
+
+    num_chunks = DIV_ROUND_UP(len ? len : QSB_CHUNK_SIZE, chunk_size);
+    alloc_len = num_chunks * chunk_size;
+
+    qsb = g_try_new0(QEMUSizedBuffer, 1);
+    if (!qsb) {
+        return NULL;
+    }
+
+    qsb->iov = g_try_new0(struct iovec, num_chunks);
+    if (!qsb->iov) {
+        g_free(qsb);
+        return NULL;
+    }
+
+    qsb->n_iov = num_chunks;
+
+    for (i = 0; i < num_chunks; i++) {
+        qsb->iov[i].iov_base = g_try_malloc0(chunk_size);
+        if (!qsb->iov[i].iov_base) {
+            /* qsb_free is safe since g_free can cope with NULL */
+            qsb_free(qsb);
+            return NULL;
+        }
+
+        qsb->iov[i].iov_len = chunk_size;
+        if (buffer) {
+            to_copy = (len - qsb->used) > chunk_size
+                      ? chunk_size : (len - qsb->used);
+            memcpy(qsb->iov[i].iov_base, &buffer[qsb->used], to_copy);
+            qsb->used += to_copy;
+        }
+    }
+
+    qsb->size = alloc_len;
+
+    return qsb;
+}
+
+/**
+ * Free the QEMUSizedBuffer
+ *
+ * @qsb: The QEMUSizedBuffer to free
+ */
+void qsb_free(QEMUSizedBuffer *qsb)
+{
+    size_t i;
+
+    if (!qsb) {
+        return;
+    }
+
+    for (i = 0; i < qsb->n_iov; i++) {
+        g_free(qsb->iov[i].iov_base);
+    }
+    g_free(qsb->iov);
+    g_free(qsb);
+}
+
+/**
+ * Get the number of used bytes in the QEMUSizedBuffer
+ *
+ * @qsb: A QEMUSizedBuffer
+ *
+ * Returns the number of bytes currently used in this buffer
+ */
+size_t qsb_get_length(const QEMUSizedBuffer *qsb)
+{
+    return qsb->used;
+}
+
+/**
+ * Set the length of the buffer; the primary usage of this
+ * function is to truncate the number of used bytes in the buffer.
+ * The size will not be extended beyond the current number of
+ * allocated bytes in the QEMUSizedBuffer.
+ *
+ * @qsb: A QEMUSizedBuffer
+ * @new_len: The new length of bytes in the buffer
+ *
+ * Returns the number of bytes the buffer was truncated or extended
+ * to.
+ */
+size_t qsb_set_length(QEMUSizedBuffer *qsb, size_t new_len)
+{
+    if (new_len <= qsb->size) {
+        qsb->used = new_len;
+    } else {
+        qsb->used = qsb->size;
+    }
+    return qsb->used;
+}
+
+/**
+ * Get the iovec that holds the data for a given position @pos.
+ *
+ * @qsb: A QEMUSizedBuffer
+ * @pos: The index of a byte in the buffer
+ * @d_off: Pointer to an offset that this function will indicate
+ *         at what position within the returned iovec the byte
+ *         is to be found
+ *
+ * Returns the index of the iovec that holds the byte at the given
+ * index @pos in the byte stream; a negative number if the iovec
+ * for the given position @pos does not exist.
+ */
+static ssize_t qsb_get_iovec(const QEMUSizedBuffer *qsb,
+                             off_t pos, off_t *d_off)
+{
+    ssize_t i;
+    off_t curr = 0;
+
+    if (pos > qsb->used) {
+        return -1;
+    }
+
+    for (i = 0; i < qsb->n_iov; i++) {
+        if (curr + qsb->iov[i].iov_len > pos) {
+            *d_off = pos - curr;
+            return i;
+        }
+        curr += qsb->iov[i].iov_len;
+    }
+    return -1;
+}
+
+/*
+ * Convert the QEMUSizedBuffer into a flat buffer.
+ *
+ * Note: If at all possible, try to avoid this function since it
+ *       may unnecessarily copy memory around.
+ *
+ * @qsb: pointer to QEMUSizedBuffer
+ * @start: offset to start at
+ * @count: number of bytes to copy
+ * @buf: a pointer to a buffer to write into (at least @count bytes)
+ *
+ * Returns the number of bytes copied into the output buffer
+ */
+ssize_t qsb_get_buffer(const QEMUSizedBuffer *qsb, off_t start,
+                       size_t count, uint8_t *buffer)
+{
+    const struct iovec *iov;
+    size_t to_copy, all_copy;
+    ssize_t index;
+    off_t s_off;
+    off_t d_off = 0;
+    char *s;
+
+    if (start > qsb->used) {
+        return 0;
+    }
+
+    all_copy = qsb->used - start;
+    if (all_copy > count) {
+        all_copy = count;
+    } else {
+        count = all_copy;
+    }
+
+    index = qsb_get_iovec(qsb, start, &s_off);
+    if (index < 0) {
+        return 0;
+    }
+
+    while (all_copy > 0) {
+        iov = &qsb->iov[index];
+
+        s = iov->iov_base;
+
+        to_copy = iov->iov_len - s_off;
+        if (to_copy > all_copy) {
+            to_copy = all_copy;
+        }
+        memcpy(&buffer[d_off], &s[s_off], to_copy);
+
+        d_off += to_copy;
+        all_copy -= to_copy;
+
+        s_off = 0;
+        index++;
+    }
+
+    return count;
+}
+
+/**
+ * Grow the QEMUSizedBuffer to the given size and allocate
+ * memory for it.
+ *
+ * @qsb: A QEMUSizedBuffer
+ * @new_size: The new size of the buffer
+ *
+ * Return:
+ *    a negative error code in case of memory allocation failure
+ * or
+ *    the new size of the buffer. The returned size may be greater or equal
+ *    to @new_size.
+ */
+static ssize_t qsb_grow(QEMUSizedBuffer *qsb, size_t new_size)
+{
+    size_t needed_chunks, i;
+
+    if (qsb->size < new_size) {
+        struct iovec *new_iov;
+        size_t size_diff = new_size - qsb->size;
+        size_t chunk_size = (size_diff > QSB_MAX_CHUNK_SIZE)
+                             ? QSB_MAX_CHUNK_SIZE : QSB_CHUNK_SIZE;
+
+        needed_chunks = DIV_ROUND_UP(size_diff, chunk_size);
+
+        new_iov = g_try_new(struct iovec, qsb->n_iov + needed_chunks);
+        if (new_iov == NULL) {
+            return -ENOMEM;
+        }
+
+        /* Allocate new chunks as needed into new_iov */
+        for (i = qsb->n_iov; i < qsb->n_iov + needed_chunks; i++) {
+            new_iov[i].iov_base = g_try_malloc0(chunk_size);
+            new_iov[i].iov_len = chunk_size;
+            if (!new_iov[i].iov_base) {
+                size_t j;
+
+                /* Free previously allocated new chunks */
+                for (j = qsb->n_iov; j < i; j++) {
+                    g_free(new_iov[j].iov_base);
+                }
+                g_free(new_iov);
+
+                return -ENOMEM;
+            }
+        }
+
+        /*
+         * Now we can't get any allocation errors, copy over to new iov
+         * and switch.
+         */
+        for (i = 0; i < qsb->n_iov; i++) {
+            new_iov[i] = qsb->iov[i];
+        }
+
+        qsb->n_iov += needed_chunks;
+        g_free(qsb->iov);
+        qsb->iov = new_iov;
+        qsb->size += (needed_chunks * chunk_size);
+    }
+
+    return qsb->size;
+}
+
+/**
+ * Write into the QEMUSizedBuffer at a given position and a given
+ * number of bytes. This function will automatically grow the
+ * QEMUSizedBuffer.
+ *
+ * @qsb: A QEMUSizedBuffer
+ * @source: A byte array to copy data from
+ * @pos: The position within the @qsb to write data to
+ * @size: The number of bytes to copy into the @qsb
+ *
+ * Returns @size or a negative error code in case of memory allocation failure,
+ *           or with an invalid 'pos'
+ */
+ssize_t qsb_write_at(QEMUSizedBuffer *qsb, const uint8_t *source,
+                     off_t pos, size_t count)
+{
+    ssize_t rc = qsb_grow(qsb, pos + count);
+    size_t to_copy;
+    size_t all_copy = count;
+    const struct iovec *iov;
+    ssize_t index;
+    char *dest;
+    off_t d_off, s_off = 0;
+
+    if (rc < 0) {
+        return rc;
+    }
+
+    if (pos + count > qsb->used) {
+        qsb->used = pos + count;
+    }
+
+    index = qsb_get_iovec(qsb, pos, &d_off);
+    if (index < 0) {
+        return -EINVAL;
+    }
+
+    while (all_copy > 0) {
+        iov = &qsb->iov[index];
+
+        dest = iov->iov_base;
+
+        to_copy = iov->iov_len - d_off;
+        if (to_copy > all_copy) {
+            to_copy = all_copy;
+        }
+
+        memcpy(&dest[d_off], &source[s_off], to_copy);
+
+        s_off += to_copy;
+        all_copy -= to_copy;
+
+        d_off = 0;
+        index++;
+    }
+
+    return count;
+}
+
+/**
+ * Create a deep copy of the given QEMUSizedBuffer.
+ *
+ * @qsb: A QEMUSizedBuffer
+ *
+ * Returns a clone of @qsb or NULL on allocation failure
+ */
+QEMUSizedBuffer *qsb_clone(const QEMUSizedBuffer *qsb)
+{
+    QEMUSizedBuffer *out = qsb_create(NULL, qsb_get_length(qsb));
+    size_t i;
+    ssize_t res;
+    off_t pos = 0;
+
+    if (!out) {
+        return NULL;
+    }
+
+    for (i = 0; i < qsb->n_iov; i++) {
+        res =  qsb_write_at(out, qsb->iov[i].iov_base,
+                            pos, qsb->iov[i].iov_len);
+        if (res < 0) {
+            qsb_free(out);
+            return NULL;
+        }
+        pos += res;
+    }
+
+    return out;
+}
+
+typedef struct QEMUBuffer {
+    QEMUSizedBuffer *qsb;
+    QEMUFile *file;
+} QEMUBuffer;
+
+static int buf_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
+{
+    QEMUBuffer *s = opaque;
+    ssize_t len = qsb_get_length(s->qsb) - pos;
+
+    if (len <= 0) {
+        return 0;
+    }
+
+    if (len > size) {
+        len = size;
+    }
+    return qsb_get_buffer(s->qsb, pos, len, buf);
+}
+
+static int buf_put_buffer(void *opaque, const uint8_t *buf,
+                          int64_t pos, int size)
+{
+    QEMUBuffer *s = opaque;
+
+    return qsb_write_at(s->qsb, buf, pos, size);
+}
+
+static int buf_close(void *opaque)
+{
+    QEMUBuffer *s = opaque;
+
+    qsb_free(s->qsb);
+
+    g_free(s);
+
+    return 0;
+}
+
+const QEMUSizedBuffer *qemu_buf_get(QEMUFile *f)
+{
+    QEMUBuffer *p;
+
+    qemu_fflush(f);
+
+    p = f->opaque;
+
+    return p->qsb;
+}
+
+static const QEMUFileOps buf_read_ops = {
+    .get_buffer = buf_get_buffer,
+    .close =      buf_close,
+};
+
+static const QEMUFileOps buf_write_ops = {
+    .put_buffer = buf_put_buffer,
+    .close =      buf_close,
+};
+
+QEMUFile *qemu_bufopen(const char *mode, QEMUSizedBuffer *input)
+{
+    QEMUBuffer *s;
+
+    if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') ||
+        mode[1] != '\0') {
+        error_report("qemu_bufopen: Argument validity check failed");
+        return NULL;
+    }
+
+    s = g_malloc0(sizeof(QEMUBuffer));
+    if (mode[0] == 'r') {
+        s->qsb = input;
+    }
+
+    if (s->qsb == NULL) {
+        s->qsb = qsb_create(NULL, 0);
+    }
+    if (!s->qsb) {
+        g_free(s);
+        error_report("qemu_bufopen: qsb_create failed");
+        return NULL;
+    }
+
+
+    if (mode[0] == 'r') {
+        s->file = qemu_fopen_ops(s, &buf_read_ops);
+    } else {
+        s->file = qemu_fopen_ops(s, &buf_write_ops);
+    }
+    return s->file;
+}
diff --git a/migration/vmstate.c b/migration/vmstate.c
new file mode 100644 (file)
index 0000000..3dde574
--- /dev/null
@@ -0,0 +1,687 @@
+#include "qemu-common.h"
+#include "migration/migration.h"
+#include "migration/qemu-file.h"
+#include "migration/vmstate.h"
+#include "qemu/bitops.h"
+#include "trace.h"
+
+static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
+                                    void *opaque);
+static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
+                                   void *opaque);
+
+static int vmstate_n_elems(void *opaque, VMStateField *field)
+{
+    int n_elems = 1;
+
+    if (field->flags & VMS_ARRAY) {
+        n_elems = field->num;
+    } else if (field->flags & VMS_VARRAY_INT32) {
+        n_elems = *(int32_t *)(opaque+field->num_offset);
+    } else if (field->flags & VMS_VARRAY_UINT32) {
+        n_elems = *(uint32_t *)(opaque+field->num_offset);
+    } else if (field->flags & VMS_VARRAY_UINT16) {
+        n_elems = *(uint16_t *)(opaque+field->num_offset);
+    } else if (field->flags & VMS_VARRAY_UINT8) {
+        n_elems = *(uint8_t *)(opaque+field->num_offset);
+    }
+
+    return n_elems;
+}
+
+static int vmstate_size(void *opaque, VMStateField *field)
+{
+    int size = field->size;
+
+    if (field->flags & VMS_VBUFFER) {
+        size = *(int32_t *)(opaque+field->size_offset);
+        if (field->flags & VMS_MULTIPLY) {
+            size *= field->size;
+        }
+    }
+
+    return size;
+}
+
+static void *vmstate_base_addr(void *opaque, VMStateField *field, bool alloc)
+{
+    void *base_addr = opaque + field->offset;
+
+    if (field->flags & VMS_POINTER) {
+        if (alloc && (field->flags & VMS_ALLOC)) {
+            gsize size = 0;
+            if (field->flags & VMS_VBUFFER) {
+                size = vmstate_size(opaque, field);
+            } else {
+                int n_elems = vmstate_n_elems(opaque, field);
+                if (n_elems) {
+                    size = n_elems * field->size;
+                }
+            }
+            if (size) {
+                *((void **)base_addr + field->start) = g_malloc(size);
+            }
+        }
+        base_addr = *(void **)base_addr + field->start;
+    }
+
+    return base_addr;
+}
+
+int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
+                       void *opaque, int version_id)
+{
+    VMStateField *field = vmsd->fields;
+    int ret;
+
+    if (version_id > vmsd->version_id) {
+        return -EINVAL;
+    }
+    if  (version_id < vmsd->minimum_version_id) {
+        if (vmsd->load_state_old &&
+            version_id >= vmsd->minimum_version_id_old) {
+            return vmsd->load_state_old(f, opaque, version_id);
+        }
+        return -EINVAL;
+    }
+    if (vmsd->pre_load) {
+        int ret = vmsd->pre_load(opaque);
+        if (ret) {
+            return ret;
+        }
+    }
+    while (field->name) {
+        if ((field->field_exists &&
+             field->field_exists(opaque, version_id)) ||
+            (!field->field_exists &&
+             field->version_id <= version_id)) {
+            void *base_addr = vmstate_base_addr(opaque, field, true);
+            int i, n_elems = vmstate_n_elems(opaque, field);
+            int size = vmstate_size(opaque, field);
+
+            for (i = 0; i < n_elems; i++) {
+                void *addr = base_addr + size * i;
+
+                if (field->flags & VMS_ARRAY_OF_POINTER) {
+                    addr = *(void **)addr;
+                }
+                if (field->flags & VMS_STRUCT) {
+                    ret = vmstate_load_state(f, field->vmsd, addr,
+                                             field->vmsd->version_id);
+                } else {
+                    ret = field->info->get(f, addr, size);
+
+                }
+                if (ret >= 0) {
+                    ret = qemu_file_get_error(f);
+                }
+                if (ret < 0) {
+                    qemu_file_set_error(f, ret);
+                    trace_vmstate_load_field_error(field->name, ret);
+                    return ret;
+                }
+            }
+        } else if (field->flags & VMS_MUST_EXIST) {
+            fprintf(stderr, "Input validation failed: %s/%s\n",
+                    vmsd->name, field->name);
+            return -1;
+        }
+        field++;
+    }
+    ret = vmstate_subsection_load(f, vmsd, opaque);
+    if (ret != 0) {
+        return ret;
+    }
+    if (vmsd->post_load) {
+        return vmsd->post_load(opaque, version_id);
+    }
+    return 0;
+}
+
+void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
+                        void *opaque)
+{
+    VMStateField *field = vmsd->fields;
+
+    if (vmsd->pre_save) {
+        vmsd->pre_save(opaque);
+    }
+    while (field->name) {
+        if (!field->field_exists ||
+            field->field_exists(opaque, vmsd->version_id)) {
+            void *base_addr = vmstate_base_addr(opaque, field, false);
+            int i, n_elems = vmstate_n_elems(opaque, field);
+            int size = vmstate_size(opaque, field);
+
+            for (i = 0; i < n_elems; i++) {
+                void *addr = base_addr + size * i;
+
+                if (field->flags & VMS_ARRAY_OF_POINTER) {
+                    addr = *(void **)addr;
+                }
+                if (field->flags & VMS_STRUCT) {
+                    vmstate_save_state(f, field->vmsd, addr);
+                } else {
+                    field->info->put(f, addr, size);
+                }
+            }
+        } else {
+            if (field->flags & VMS_MUST_EXIST) {
+                fprintf(stderr, "Output state validation failed: %s/%s\n",
+                        vmsd->name, field->name);
+                assert(!(field->flags & VMS_MUST_EXIST));
+            }
+        }
+        field++;
+    }
+    vmstate_subsection_save(f, vmsd, opaque);
+}
+
+static const VMStateDescription *
+    vmstate_get_subsection(const VMStateSubsection *sub, char *idstr)
+{
+    while (sub && sub->needed) {
+        if (strcmp(idstr, sub->vmsd->name) == 0) {
+            return sub->vmsd;
+        }
+        sub++;
+    }
+    return NULL;
+}
+
+static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
+                                   void *opaque)
+{
+    while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) {
+        char idstr[256];
+        int ret;
+        uint8_t version_id, len, size;
+        const VMStateDescription *sub_vmsd;
+
+        len = qemu_peek_byte(f, 1);
+        if (len < strlen(vmsd->name) + 1) {
+            /* subsection name has be be "section_name/a" */
+            return 0;
+        }
+        size = qemu_peek_buffer(f, (uint8_t *)idstr, len, 2);
+        if (size != len) {
+            return 0;
+        }
+        idstr[size] = 0;
+
+        if (strncmp(vmsd->name, idstr, strlen(vmsd->name)) != 0) {
+            /* it don't have a valid subsection name */
+            return 0;
+        }
+        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
+        if (sub_vmsd == NULL) {
+            return -ENOENT;
+        }
+        qemu_file_skip(f, 1); /* subsection */
+        qemu_file_skip(f, 1); /* len */
+        qemu_file_skip(f, len); /* idstr */
+        version_id = qemu_get_be32(f);
+
+        ret = vmstate_load_state(f, sub_vmsd, opaque, version_id);
+        if (ret) {
+            return ret;
+        }
+    }
+    return 0;
+}
+
+static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
+                                    void *opaque)
+{
+    const VMStateSubsection *sub = vmsd->subsections;
+
+    while (sub && sub->needed) {
+        if (sub->needed(opaque)) {
+            const VMStateDescription *vmsd = sub->vmsd;
+            uint8_t len;
+
+            qemu_put_byte(f, QEMU_VM_SUBSECTION);
+            len = strlen(vmsd->name);
+            qemu_put_byte(f, len);
+            qemu_put_buffer(f, (uint8_t *)vmsd->name, len);
+            qemu_put_be32(f, vmsd->version_id);
+            vmstate_save_state(f, vmsd, opaque);
+        }
+        sub++;
+    }
+}
+
+/* bool */
+
+static int get_bool(QEMUFile *f, void *pv, size_t size)
+{
+    bool *v = pv;
+    *v = qemu_get_byte(f);
+    return 0;
+}
+
+static void put_bool(QEMUFile *f, void *pv, size_t size)
+{
+    bool *v = pv;
+    qemu_put_byte(f, *v);
+}
+
+const VMStateInfo vmstate_info_bool = {
+    .name = "bool",
+    .get  = get_bool,
+    .put  = put_bool,
+};
+
+/* 8 bit int */
+
+static int get_int8(QEMUFile *f, void *pv, size_t size)
+{
+    int8_t *v = pv;
+    qemu_get_s8s(f, v);
+    return 0;
+}
+
+static void put_int8(QEMUFile *f, void *pv, size_t size)
+{
+    int8_t *v = pv;
+    qemu_put_s8s(f, v);
+}
+
+const VMStateInfo vmstate_info_int8 = {
+    .name = "int8",
+    .get  = get_int8,
+    .put  = put_int8,
+};
+
+/* 16 bit int */
+
+static int get_int16(QEMUFile *f, void *pv, size_t size)
+{
+    int16_t *v = pv;
+    qemu_get_sbe16s(f, v);
+    return 0;
+}
+
+static void put_int16(QEMUFile *f, void *pv, size_t size)
+{
+    int16_t *v = pv;
+    qemu_put_sbe16s(f, v);
+}
+
+const VMStateInfo vmstate_info_int16 = {
+    .name = "int16",
+    .get  = get_int16,
+    .put  = put_int16,
+};
+
+/* 32 bit int */
+
+static int get_int32(QEMUFile *f, void *pv, size_t size)
+{
+    int32_t *v = pv;
+    qemu_get_sbe32s(f, v);
+    return 0;
+}
+
+static void put_int32(QEMUFile *f, void *pv, size_t size)
+{
+    int32_t *v = pv;
+    qemu_put_sbe32s(f, v);
+}
+
+const VMStateInfo vmstate_info_int32 = {
+    .name = "int32",
+    .get  = get_int32,
+    .put  = put_int32,
+};
+
+/* 32 bit int. See that the received value is the same than the one
+   in the field */
+
+static int get_int32_equal(QEMUFile *f, void *pv, size_t size)
+{
+    int32_t *v = pv;
+    int32_t v2;
+    qemu_get_sbe32s(f, &v2);
+
+    if (*v == v2) {
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_int32_equal = {
+    .name = "int32 equal",
+    .get  = get_int32_equal,
+    .put  = put_int32,
+};
+
+/* 32 bit int. Check that the received value is non-negative
+ * and less than or equal to the one in the field.
+ */
+
+static int get_int32_le(QEMUFile *f, void *pv, size_t size)
+{
+    int32_t *cur = pv;
+    int32_t loaded;
+    qemu_get_sbe32s(f, &loaded);
+
+    if (loaded >= 0 && loaded <= *cur) {
+        *cur = loaded;
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_int32_le = {
+    .name = "int32 le",
+    .get  = get_int32_le,
+    .put  = put_int32,
+};
+
+/* 64 bit int */
+
+static int get_int64(QEMUFile *f, void *pv, size_t size)
+{
+    int64_t *v = pv;
+    qemu_get_sbe64s(f, v);
+    return 0;
+}
+
+static void put_int64(QEMUFile *f, void *pv, size_t size)
+{
+    int64_t *v = pv;
+    qemu_put_sbe64s(f, v);
+}
+
+const VMStateInfo vmstate_info_int64 = {
+    .name = "int64",
+    .get  = get_int64,
+    .put  = put_int64,
+};
+
+/* 8 bit unsigned int */
+
+static int get_uint8(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t *v = pv;
+    qemu_get_8s(f, v);
+    return 0;
+}
+
+static void put_uint8(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t *v = pv;
+    qemu_put_8s(f, v);
+}
+
+const VMStateInfo vmstate_info_uint8 = {
+    .name = "uint8",
+    .get  = get_uint8,
+    .put  = put_uint8,
+};
+
+/* 16 bit unsigned int */
+
+static int get_uint16(QEMUFile *f, void *pv, size_t size)
+{
+    uint16_t *v = pv;
+    qemu_get_be16s(f, v);
+    return 0;
+}
+
+static void put_uint16(QEMUFile *f, void *pv, size_t size)
+{
+    uint16_t *v = pv;
+    qemu_put_be16s(f, v);
+}
+
+const VMStateInfo vmstate_info_uint16 = {
+    .name = "uint16",
+    .get  = get_uint16,
+    .put  = put_uint16,
+};
+
+/* 32 bit unsigned int */
+
+static int get_uint32(QEMUFile *f, void *pv, size_t size)
+{
+    uint32_t *v = pv;
+    qemu_get_be32s(f, v);
+    return 0;
+}
+
+static void put_uint32(QEMUFile *f, void *pv, size_t size)
+{
+    uint32_t *v = pv;
+    qemu_put_be32s(f, v);
+}
+
+const VMStateInfo vmstate_info_uint32 = {
+    .name = "uint32",
+    .get  = get_uint32,
+    .put  = put_uint32,
+};
+
+/* 32 bit uint. See that the received value is the same than the one
+   in the field */
+
+static int get_uint32_equal(QEMUFile *f, void *pv, size_t size)
+{
+    uint32_t *v = pv;
+    uint32_t v2;
+    qemu_get_be32s(f, &v2);
+
+    if (*v == v2) {
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_uint32_equal = {
+    .name = "uint32 equal",
+    .get  = get_uint32_equal,
+    .put  = put_uint32,
+};
+
+/* 64 bit unsigned int */
+
+static int get_uint64(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+    qemu_get_be64s(f, v);
+    return 0;
+}
+
+static void put_uint64(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+    qemu_put_be64s(f, v);
+}
+
+const VMStateInfo vmstate_info_uint64 = {
+    .name = "uint64",
+    .get  = get_uint64,
+    .put  = put_uint64,
+};
+
+/* 64 bit unsigned int. See that the received value is the same than the one
+   in the field */
+
+static int get_uint64_equal(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+    uint64_t v2;
+    qemu_get_be64s(f, &v2);
+
+    if (*v == v2) {
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_uint64_equal = {
+    .name = "int64 equal",
+    .get  = get_uint64_equal,
+    .put  = put_uint64,
+};
+
+/* 8 bit int. See that the received value is the same than the one
+   in the field */
+
+static int get_uint8_equal(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t *v = pv;
+    uint8_t v2;
+    qemu_get_8s(f, &v2);
+
+    if (*v == v2) {
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_uint8_equal = {
+    .name = "uint8 equal",
+    .get  = get_uint8_equal,
+    .put  = put_uint8,
+};
+
+/* 16 bit unsigned int int. See that the received value is the same than the one
+   in the field */
+
+static int get_uint16_equal(QEMUFile *f, void *pv, size_t size)
+{
+    uint16_t *v = pv;
+    uint16_t v2;
+    qemu_get_be16s(f, &v2);
+
+    if (*v == v2) {
+        return 0;
+    }
+    return -EINVAL;
+}
+
+const VMStateInfo vmstate_info_uint16_equal = {
+    .name = "uint16 equal",
+    .get  = get_uint16_equal,
+    .put  = put_uint16,
+};
+
+/* floating point */
+
+static int get_float64(QEMUFile *f, void *pv, size_t size)
+{
+    float64 *v = pv;
+
+    *v = make_float64(qemu_get_be64(f));
+    return 0;
+}
+
+static void put_float64(QEMUFile *f, void *pv, size_t size)
+{
+    uint64_t *v = pv;
+
+    qemu_put_be64(f, float64_val(*v));
+}
+
+const VMStateInfo vmstate_info_float64 = {
+    .name = "float64",
+    .get  = get_float64,
+    .put  = put_float64,
+};
+
+/* uint8_t buffers */
+
+static int get_buffer(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t *v = pv;
+    qemu_get_buffer(f, v, size);
+    return 0;
+}
+
+static void put_buffer(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t *v = pv;
+    qemu_put_buffer(f, v, size);
+}
+
+const VMStateInfo vmstate_info_buffer = {
+    .name = "buffer",
+    .get  = get_buffer,
+    .put  = put_buffer,
+};
+
+/* unused buffers: space that was used for some fields that are
+   not useful anymore */
+
+static int get_unused_buffer(QEMUFile *f, void *pv, size_t size)
+{
+    uint8_t buf[1024];
+    int block_len;
+
+    while (size > 0) {
+        block_len = MIN(sizeof(buf), size);
+        size -= block_len;
+        qemu_get_buffer(f, buf, block_len);
+    }
+   return 0;
+}
+
+static void put_unused_buffer(QEMUFile *f, void *pv, size_t size)
+{
+    static const uint8_t buf[1024];
+    int block_len;
+
+    while (size > 0) {
+        block_len = MIN(sizeof(buf), size);
+        size -= block_len;
+        qemu_put_buffer(f, buf, block_len);
+    }
+}
+
+const VMStateInfo vmstate_info_unused_buffer = {
+    .name = "unused_buffer",
+    .get  = get_unused_buffer,
+    .put  = put_unused_buffer,
+};
+
+/* bitmaps (as defined by bitmap.h). Note that size here is the size
+ * of the bitmap in bits. The on-the-wire format of a bitmap is 64
+ * bit words with the bits in big endian order. The in-memory format
+ * is an array of 'unsigned long', which may be either 32 or 64 bits.
+ */
+/* This is the number of 64 bit words sent over the wire */
+#define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, 64)
+static int get_bitmap(QEMUFile *f, void *pv, size_t size)
+{
+    unsigned long *bmp = pv;
+    int i, idx = 0;
+    for (i = 0; i < BITS_TO_U64S(size); i++) {
+        uint64_t w = qemu_get_be64(f);
+        bmp[idx++] = w;
+        if (sizeof(unsigned long) == 4 && idx < BITS_TO_LONGS(size)) {
+            bmp[idx++] = w >> 32;
+        }
+    }
+    return 0;
+}
+
+static void put_bitmap(QEMUFile *f, void *pv, size_t size)
+{
+    unsigned long *bmp = pv;
+    int i, idx = 0;
+    for (i = 0; i < BITS_TO_U64S(size); i++) {
+        uint64_t w = bmp[idx++];
+        if (sizeof(unsigned long) == 4 && idx < BITS_TO_LONGS(size)) {
+            w |= ((uint64_t)bmp[idx++]) << 32;
+        }
+        qemu_put_be64(f, w);
+    }
+}
+
+const VMStateInfo vmstate_info_bitmap = {
+    .name = "bitmap",
+    .get = get_bitmap,
+    .put = put_bitmap,
+};
diff --git a/migration/xbzrle.c b/migration/xbzrle.c
new file mode 100644 (file)
index 0000000..8e220bf
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Xor Based Zero Run Length Encoding
+ *
+ * Copyright 2013 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Orit Wasserman  <owasserm@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu-common.h"
+#include "include/migration/migration.h"
+
+/*
+  page = zrun nzrun
+       | zrun nzrun page
+
+  zrun = length
+
+  nzrun = length byte...
+
+  length = uleb128 encoded integer
+ */
+int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
+                         uint8_t *dst, int dlen)
+{
+    uint32_t zrun_len = 0, nzrun_len = 0;
+    int d = 0, i = 0;
+    long res;
+    uint8_t *nzrun_start = NULL;
+
+    g_assert(!(((uintptr_t)old_buf | (uintptr_t)new_buf | slen) %
+               sizeof(long)));
+
+    while (i < slen) {
+        /* overflow */
+        if (d + 2 > dlen) {
+            return -1;
+        }
+
+        /* not aligned to sizeof(long) */
+        res = (slen - i) % sizeof(long);
+        while (res && old_buf[i] == new_buf[i]) {
+            zrun_len++;
+            i++;
+            res--;
+        }
+
+        /* word at a time for speed */
+        if (!res) {
+            while (i < slen &&
+                   (*(long *)(old_buf + i)) == (*(long *)(new_buf + i))) {
+                i += sizeof(long);
+                zrun_len += sizeof(long);
+            }
+
+            /* go over the rest */
+            while (i < slen && old_buf[i] == new_buf[i]) {
+                zrun_len++;
+                i++;
+            }
+        }
+
+        /* buffer unchanged */
+        if (zrun_len == slen) {
+            return 0;
+        }
+
+        /* skip last zero run */
+        if (i == slen) {
+            return d;
+        }
+
+        d += uleb128_encode_small(dst + d, zrun_len);
+
+        zrun_len = 0;
+        nzrun_start = new_buf + i;
+
+        /* overflow */
+        if (d + 2 > dlen) {
+            return -1;
+        }
+        /* not aligned to sizeof(long) */
+        res = (slen - i) % sizeof(long);
+        while (res && old_buf[i] != new_buf[i]) {
+            i++;
+            nzrun_len++;
+            res--;
+        }
+
+        /* word at a time for speed, use of 32-bit long okay */
+        if (!res) {
+            /* truncation to 32-bit long okay */
+            unsigned long mask = (unsigned long)0x0101010101010101ULL;
+            while (i < slen) {
+                unsigned long xor;
+                xor = *(unsigned long *)(old_buf + i)
+                    ^ *(unsigned long *)(new_buf + i);
+                if ((xor - mask) & ~xor & (mask << 7)) {
+                    /* found the end of an nzrun within the current long */
+                    while (old_buf[i] != new_buf[i]) {
+                        nzrun_len++;
+                        i++;
+                    }
+                    break;
+                } else {
+                    i += sizeof(long);
+                    nzrun_len += sizeof(long);
+                }
+            }
+        }
+
+        d += uleb128_encode_small(dst + d, nzrun_len);
+        /* overflow */
+        if (d + nzrun_len > dlen) {
+            return -1;
+        }
+        memcpy(dst + d, nzrun_start, nzrun_len);
+        d += nzrun_len;
+        nzrun_len = 0;
+    }
+
+    return d;
+}
+
+int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen)
+{
+    int i = 0, d = 0;
+    int ret;
+    uint32_t count = 0;
+
+    while (i < slen) {
+
+        /* zrun */
+        if ((slen - i) < 2) {
+            return -1;
+        }
+
+        ret = uleb128_decode_small(src + i, &count);
+        if (ret < 0 || (i && !count)) {
+            return -1;
+        }
+        i += ret;
+        d += count;
+
+        /* overflow */
+        if (d > dlen) {
+            return -1;
+        }
+
+        /* nzrun */
+        if ((slen - i) < 2) {
+            return -1;
+        }
+
+        ret = uleb128_decode_small(src + i, &count);
+        if (ret < 0 || !count) {
+            return -1;
+        }
+        i += ret;
+
+        /* overflow */
+        if (d + count > dlen || i + count > slen) {
+            return -1;
+        }
+
+        memcpy(dst + d, src + i, count);
+        d += count;
+        i += count;
+    }
+
+    return d;
+}
diff --git a/qemu-file-stdio.c b/qemu-file-stdio.c
deleted file mode 100644 (file)
index 285068b..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#include "qemu-common.h"
-#include "block/coroutine.h"
-#include "migration/qemu-file.h"
-
-typedef struct QEMUFileStdio {
-    FILE *stdio_file;
-    QEMUFile *file;
-} QEMUFileStdio;
-
-static int stdio_get_fd(void *opaque)
-{
-    QEMUFileStdio *s = opaque;
-
-    return fileno(s->stdio_file);
-}
-
-static int stdio_put_buffer(void *opaque, const uint8_t *buf, int64_t pos,
-                            int size)
-{
-    QEMUFileStdio *s = opaque;
-    int res;
-
-    res = fwrite(buf, 1, size, s->stdio_file);
-
-    if (res != size) {
-        return -errno;
-    }
-    return res;
-}
-
-static int stdio_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
-{
-    QEMUFileStdio *s = opaque;
-    FILE *fp = s->stdio_file;
-    int bytes;
-
-    for (;;) {
-        clearerr(fp);
-        bytes = fread(buf, 1, size, fp);
-        if (bytes != 0 || !ferror(fp)) {
-            break;
-        }
-        if (errno == EAGAIN) {
-            yield_until_fd_readable(fileno(fp));
-        } else if (errno != EINTR) {
-            break;
-        }
-    }
-    return bytes;
-}
-
-static int stdio_pclose(void *opaque)
-{
-    QEMUFileStdio *s = opaque;
-    int ret;
-    ret = pclose(s->stdio_file);
-    if (ret == -1) {
-        ret = -errno;
-    } else if (!WIFEXITED(ret) || WEXITSTATUS(ret) != 0) {
-        /* close succeeded, but non-zero exit code: */
-        ret = -EIO; /* fake errno value */
-    }
-    g_free(s);
-    return ret;
-}
-
-static int stdio_fclose(void *opaque)
-{
-    QEMUFileStdio *s = opaque;
-    int ret = 0;
-
-    if (qemu_file_is_writable(s->file)) {
-        int fd = fileno(s->stdio_file);
-        struct stat st;
-
-        ret = fstat(fd, &st);
-        if (ret == 0 && S_ISREG(st.st_mode)) {
-            /*
-             * If the file handle is a regular file make sure the
-             * data is flushed to disk before signaling success.
-             */
-            ret = fsync(fd);
-            if (ret != 0) {
-                ret = -errno;
-                return ret;
-            }
-        }
-    }
-    if (fclose(s->stdio_file) == EOF) {
-        ret = -errno;
-    }
-    g_free(s);
-    return ret;
-}
-
-static const QEMUFileOps stdio_pipe_read_ops = {
-    .get_fd =     stdio_get_fd,
-    .get_buffer = stdio_get_buffer,
-    .close =      stdio_pclose
-};
-
-static const QEMUFileOps stdio_pipe_write_ops = {
-    .get_fd =     stdio_get_fd,
-    .put_buffer = stdio_put_buffer,
-    .close =      stdio_pclose
-};
-
-QEMUFile *qemu_popen_cmd(const char *command, const char *mode)
-{
-    FILE *stdio_file;
-    QEMUFileStdio *s;
-
-    if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') || mode[1] != 0) {
-        fprintf(stderr, "qemu_popen: Argument validity check failed\n");
-        return NULL;
-    }
-
-    stdio_file = popen(command, mode);
-    if (stdio_file == NULL) {
-        return NULL;
-    }
-
-    s = g_malloc0(sizeof(QEMUFileStdio));
-
-    s->stdio_file = stdio_file;
-
-    if (mode[0] == 'r') {
-        s->file = qemu_fopen_ops(s, &stdio_pipe_read_ops);
-    } else {
-        s->file = qemu_fopen_ops(s, &stdio_pipe_write_ops);
-    }
-    return s->file;
-}
-
-static const QEMUFileOps stdio_file_read_ops = {
-    .get_fd =     stdio_get_fd,
-    .get_buffer = stdio_get_buffer,
-    .close =      stdio_fclose
-};
-
-static const QEMUFileOps stdio_file_write_ops = {
-    .get_fd =     stdio_get_fd,
-    .put_buffer = stdio_put_buffer,
-    .close =      stdio_fclose
-};
-
-QEMUFile *qemu_fopen(const char *filename, const char *mode)
-{
-    QEMUFileStdio *s;
-
-    if (qemu_file_mode_is_not_valid(mode)) {
-        return NULL;
-    }
-
-    s = g_malloc0(sizeof(QEMUFileStdio));
-
-    s->stdio_file = fopen(filename, mode);
-    if (!s->stdio_file) {
-        goto fail;
-    }
-
-    if (mode[0] == 'w') {
-        s->file = qemu_fopen_ops(s, &stdio_file_write_ops);
-    } else {
-        s->file = qemu_fopen_ops(s, &stdio_file_read_ops);
-    }
-    return s->file;
-fail:
-    g_free(s);
-    return NULL;
-}
diff --git a/qemu-file-unix.c b/qemu-file-unix.c
deleted file mode 100644 (file)
index 9682396..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#include "qemu-common.h"
-#include "qemu/iov.h"
-#include "qemu/sockets.h"
-#include "block/coroutine.h"
-#include "migration/qemu-file.h"
-
-typedef struct QEMUFileSocket {
-    int fd;
-    QEMUFile *file;
-} QEMUFileSocket;
-
-static ssize_t socket_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
-                                    int64_t pos)
-{
-    QEMUFileSocket *s = opaque;
-    ssize_t len;
-    ssize_t size = iov_size(iov, iovcnt);
-
-    len = iov_send(s->fd, iov, iovcnt, 0, size);
-    if (len < size) {
-        len = -socket_error();
-    }
-    return len;
-}
-
-static int socket_get_fd(void *opaque)
-{
-    QEMUFileSocket *s = opaque;
-
-    return s->fd;
-}
-
-static int socket_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
-{
-    QEMUFileSocket *s = opaque;
-    ssize_t len;
-
-    for (;;) {
-        len = qemu_recv(s->fd, buf, size, 0);
-        if (len != -1) {
-            break;
-        }
-        if (socket_error() == EAGAIN) {
-            yield_until_fd_readable(s->fd);
-        } else if (socket_error() != EINTR) {
-            break;
-        }
-    }
-
-    if (len == -1) {
-        len = -socket_error();
-    }
-    return len;
-}
-
-static int socket_close(void *opaque)
-{
-    QEMUFileSocket *s = opaque;
-    closesocket(s->fd);
-    g_free(s);
-    return 0;
-}
-
-static ssize_t unix_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
-                                  int64_t pos)
-{
-    QEMUFileSocket *s = opaque;
-    ssize_t len, offset;
-    ssize_t size = iov_size(iov, iovcnt);
-    ssize_t total = 0;
-
-    assert(iovcnt > 0);
-    offset = 0;
-    while (size > 0) {
-        /* Find the next start position; skip all full-sized vector elements  */
-        while (offset >= iov[0].iov_len) {
-            offset -= iov[0].iov_len;
-            iov++, iovcnt--;
-        }
-
-        /* skip `offset' bytes from the (now) first element, undo it on exit */
-        assert(iovcnt > 0);
-        iov[0].iov_base += offset;
-        iov[0].iov_len -= offset;
-
-        do {
-            len = writev(s->fd, iov, iovcnt);
-        } while (len == -1 && errno == EINTR);
-        if (len == -1) {
-            return -errno;
-        }
-
-        /* Undo the changes above */
-        iov[0].iov_base -= offset;
-        iov[0].iov_len += offset;
-
-        /* Prepare for the next iteration */
-        offset += len;
-        total += len;
-        size -= len;
-    }
-
-    return total;
-}
-
-static int unix_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
-{
-    QEMUFileSocket *s = opaque;
-    ssize_t len;
-
-    for (;;) {
-        len = read(s->fd, buf, size);
-        if (len != -1) {
-            break;
-        }
-        if (errno == EAGAIN) {
-            yield_until_fd_readable(s->fd);
-        } else if (errno != EINTR) {
-            break;
-        }
-    }
-
-    if (len == -1) {
-        len = -errno;
-    }
-    return len;
-}
-
-static int unix_close(void *opaque)
-{
-    QEMUFileSocket *s = opaque;
-    close(s->fd);
-    g_free(s);
-    return 0;
-}
-
-static const QEMUFileOps unix_read_ops = {
-    .get_fd =     socket_get_fd,
-    .get_buffer = unix_get_buffer,
-    .close =      unix_close
-};
-
-static const QEMUFileOps unix_write_ops = {
-    .get_fd =     socket_get_fd,
-    .writev_buffer = unix_writev_buffer,
-    .close =      unix_close
-};
-
-QEMUFile *qemu_fdopen(int fd, const char *mode)
-{
-    QEMUFileSocket *s;
-
-    if (mode == NULL ||
-        (mode[0] != 'r' && mode[0] != 'w') ||
-        mode[1] != 'b' || mode[2] != 0) {
-        fprintf(stderr, "qemu_fdopen: Argument validity check failed\n");
-        return NULL;
-    }
-
-    s = g_malloc0(sizeof(QEMUFileSocket));
-    s->fd = fd;
-
-    if (mode[0] == 'r') {
-        s->file = qemu_fopen_ops(s, &unix_read_ops);
-    } else {
-        s->file = qemu_fopen_ops(s, &unix_write_ops);
-    }
-    return s->file;
-}
-
-static const QEMUFileOps socket_read_ops = {
-    .get_fd =     socket_get_fd,
-    .get_buffer = socket_get_buffer,
-    .close =      socket_close
-};
-
-static const QEMUFileOps socket_write_ops = {
-    .get_fd =     socket_get_fd,
-    .writev_buffer = socket_writev_buffer,
-    .close =      socket_close
-};
-
-QEMUFile *qemu_fopen_socket(int fd, const char *mode)
-{
-    QEMUFileSocket *s;
-
-    if (qemu_file_mode_is_not_valid(mode)) {
-        return NULL;
-    }
-
-    s = g_malloc0(sizeof(QEMUFileSocket));
-    s->fd = fd;
-    if (mode[0] == 'w') {
-        qemu_set_block(s->fd);
-        s->file = qemu_fopen_ops(s, &socket_write_ops);
-    } else {
-        s->file = qemu_fopen_ops(s, &socket_read_ops);
-    }
-    return s->file;
-}
diff --git a/qemu-file.c b/qemu-file.c
deleted file mode 100644 (file)
index f938e36..0000000
+++ /dev/null
@@ -1,995 +0,0 @@
-/*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#include "qemu-common.h"
-#include "qemu/iov.h"
-#include "qemu/sockets.h"
-#include "block/coroutine.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "trace.h"
-
-#define IO_BUF_SIZE 32768
-#define MAX_IOV_SIZE MIN(IOV_MAX, 64)
-
-struct QEMUFile {
-    const QEMUFileOps *ops;
-    void *opaque;
-
-    int64_t bytes_xfer;
-    int64_t xfer_limit;
-
-    int64_t pos; /* start of buffer when writing, end of buffer
-                    when reading */
-    int buf_index;
-    int buf_size; /* 0 when writing */
-    uint8_t buf[IO_BUF_SIZE];
-
-    struct iovec iov[MAX_IOV_SIZE];
-    unsigned int iovcnt;
-
-    int last_error;
-};
-
-bool qemu_file_mode_is_not_valid(const char *mode)
-{
-    if (mode == NULL ||
-        (mode[0] != 'r' && mode[0] != 'w') ||
-        mode[1] != 'b' || mode[2] != 0) {
-        fprintf(stderr, "qemu_fopen: Argument validity check failed\n");
-        return true;
-    }
-
-    return false;
-}
-
-QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops)
-{
-    QEMUFile *f;
-
-    f = g_malloc0(sizeof(QEMUFile));
-
-    f->opaque = opaque;
-    f->ops = ops;
-    return f;
-}
-
-/*
- * Get last error for stream f
- *
- * Return negative error value if there has been an error on previous
- * operations, return 0 if no error happened.
- *
- */
-int qemu_file_get_error(QEMUFile *f)
-{
-    return f->last_error;
-}
-
-void qemu_file_set_error(QEMUFile *f, int ret)
-{
-    if (f->last_error == 0) {
-        f->last_error = ret;
-    }
-}
-
-bool qemu_file_is_writable(QEMUFile *f)
-{
-    return f->ops->writev_buffer || f->ops->put_buffer;
-}
-
-/**
- * Flushes QEMUFile buffer
- *
- * If there is writev_buffer QEMUFileOps it uses it otherwise uses
- * put_buffer ops.
- */
-void qemu_fflush(QEMUFile *f)
-{
-    ssize_t ret = 0;
-
-    if (!qemu_file_is_writable(f)) {
-        return;
-    }
-
-    if (f->ops->writev_buffer) {
-        if (f->iovcnt > 0) {
-            ret = f->ops->writev_buffer(f->opaque, f->iov, f->iovcnt, f->pos);
-        }
-    } else {
-        if (f->buf_index > 0) {
-            ret = f->ops->put_buffer(f->opaque, f->buf, f->pos, f->buf_index);
-        }
-    }
-    if (ret >= 0) {
-        f->pos += ret;
-    }
-    f->buf_index = 0;
-    f->iovcnt = 0;
-    if (ret < 0) {
-        qemu_file_set_error(f, ret);
-    }
-}
-
-void ram_control_before_iterate(QEMUFile *f, uint64_t flags)
-{
-    int ret = 0;
-
-    if (f->ops->before_ram_iterate) {
-        ret = f->ops->before_ram_iterate(f, f->opaque, flags);
-        if (ret < 0) {
-            qemu_file_set_error(f, ret);
-        }
-    }
-}
-
-void ram_control_after_iterate(QEMUFile *f, uint64_t flags)
-{
-    int ret = 0;
-
-    if (f->ops->after_ram_iterate) {
-        ret = f->ops->after_ram_iterate(f, f->opaque, flags);
-        if (ret < 0) {
-            qemu_file_set_error(f, ret);
-        }
-    }
-}
-
-void ram_control_load_hook(QEMUFile *f, uint64_t flags)
-{
-    int ret = -EINVAL;
-
-    if (f->ops->hook_ram_load) {
-        ret = f->ops->hook_ram_load(f, f->opaque, flags);
-        if (ret < 0) {
-            qemu_file_set_error(f, ret);
-        }
-    } else {
-        qemu_file_set_error(f, ret);
-    }
-}
-
-size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
-                         ram_addr_t offset, size_t size, int *bytes_sent)
-{
-    if (f->ops->save_page) {
-        int ret = f->ops->save_page(f, f->opaque, block_offset,
-                                    offset, size, bytes_sent);
-
-        if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_sent && *bytes_sent > 0) {
-                qemu_update_position(f, *bytes_sent);
-            } else if (ret < 0) {
-                qemu_file_set_error(f, ret);
-            }
-        }
-
-        return ret;
-    }
-
-    return RAM_SAVE_CONTROL_NOT_SUPP;
-}
-
-/*
- * Attempt to fill the buffer from the underlying file
- * Returns the number of bytes read, or negative value for an error.
- *
- * Note that it can return a partially full buffer even in a not error/not EOF
- * case if the underlying file descriptor gives a short read, and that can
- * happen even on a blocking fd.
- */
-static ssize_t qemu_fill_buffer(QEMUFile *f)
-{
-    int len;
-    int pending;
-
-    assert(!qemu_file_is_writable(f));
-
-    pending = f->buf_size - f->buf_index;
-    if (pending > 0) {
-        memmove(f->buf, f->buf + f->buf_index, pending);
-    }
-    f->buf_index = 0;
-    f->buf_size = pending;
-
-    len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos,
-                        IO_BUF_SIZE - pending);
-    if (len > 0) {
-        f->buf_size += len;
-        f->pos += len;
-    } else if (len == 0) {
-        qemu_file_set_error(f, -EIO);
-    } else if (len != -EAGAIN) {
-        qemu_file_set_error(f, len);
-    }
-
-    return len;
-}
-
-int qemu_get_fd(QEMUFile *f)
-{
-    if (f->ops->get_fd) {
-        return f->ops->get_fd(f->opaque);
-    }
-    return -1;
-}
-
-void qemu_update_position(QEMUFile *f, size_t size)
-{
-    f->pos += size;
-}
-
-/** Closes the file
- *
- * Returns negative error value if any error happened on previous operations or
- * while closing the file. Returns 0 or positive number on success.
- *
- * The meaning of return value on success depends on the specific backend
- * being used.
- */
-int qemu_fclose(QEMUFile *f)
-{
-    int ret;
-    qemu_fflush(f);
-    ret = qemu_file_get_error(f);
-
-    if (f->ops->close) {
-        int ret2 = f->ops->close(f->opaque);
-        if (ret >= 0) {
-            ret = ret2;
-        }
-    }
-    /* If any error was spotted before closing, we should report it
-     * instead of the close() return value.
-     */
-    if (f->last_error) {
-        ret = f->last_error;
-    }
-    g_free(f);
-    trace_qemu_file_fclose();
-    return ret;
-}
-
-static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size)
-{
-    /* check for adjacent buffer and coalesce them */
-    if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base +
-        f->iov[f->iovcnt - 1].iov_len) {
-        f->iov[f->iovcnt - 1].iov_len += size;
-    } else {
-        f->iov[f->iovcnt].iov_base = (uint8_t *)buf;
-        f->iov[f->iovcnt++].iov_len = size;
-    }
-
-    if (f->iovcnt >= MAX_IOV_SIZE) {
-        qemu_fflush(f);
-    }
-}
-
-void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size)
-{
-    if (!f->ops->writev_buffer) {
-        qemu_put_buffer(f, buf, size);
-        return;
-    }
-
-    if (f->last_error) {
-        return;
-    }
-
-    f->bytes_xfer += size;
-    add_to_iovec(f, buf, size);
-}
-
-void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size)
-{
-    int l;
-
-    if (f->last_error) {
-        return;
-    }
-
-    while (size > 0) {
-        l = IO_BUF_SIZE - f->buf_index;
-        if (l > size) {
-            l = size;
-        }
-        memcpy(f->buf + f->buf_index, buf, l);
-        f->bytes_xfer += l;
-        if (f->ops->writev_buffer) {
-            add_to_iovec(f, f->buf + f->buf_index, l);
-        }
-        f->buf_index += l;
-        if (f->buf_index == IO_BUF_SIZE) {
-            qemu_fflush(f);
-        }
-        if (qemu_file_get_error(f)) {
-            break;
-        }
-        buf += l;
-        size -= l;
-    }
-}
-
-void qemu_put_byte(QEMUFile *f, int v)
-{
-    if (f->last_error) {
-        return;
-    }
-
-    f->buf[f->buf_index] = v;
-    f->bytes_xfer++;
-    if (f->ops->writev_buffer) {
-        add_to_iovec(f, f->buf + f->buf_index, 1);
-    }
-    f->buf_index++;
-    if (f->buf_index == IO_BUF_SIZE) {
-        qemu_fflush(f);
-    }
-}
-
-void qemu_file_skip(QEMUFile *f, int size)
-{
-    if (f->buf_index + size <= f->buf_size) {
-        f->buf_index += size;
-    }
-}
-
-/*
- * Read 'size' bytes from file (at 'offset') into buf without moving the
- * pointer.
- *
- * It will return size bytes unless there was an error, in which case it will
- * return as many as it managed to read (assuming blocking fd's which
- * all current QEMUFile are)
- */
-int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset)
-{
-    int pending;
-    int index;
-
-    assert(!qemu_file_is_writable(f));
-    assert(offset < IO_BUF_SIZE);
-    assert(size <= IO_BUF_SIZE - offset);
-
-    /* The 1st byte to read from */
-    index = f->buf_index + offset;
-    /* The number of available bytes starting at index */
-    pending = f->buf_size - index;
-
-    /*
-     * qemu_fill_buffer might return just a few bytes, even when there isn't
-     * an error, so loop collecting them until we get enough.
-     */
-    while (pending < size) {
-        int received = qemu_fill_buffer(f);
-
-        if (received <= 0) {
-            break;
-        }
-
-        index = f->buf_index + offset;
-        pending = f->buf_size - index;
-    }
-
-    if (pending <= 0) {
-        return 0;
-    }
-    if (size > pending) {
-        size = pending;
-    }
-
-    memcpy(buf, f->buf + index, size);
-    return size;
-}
-
-/*
- * Read 'size' bytes of data from the file into buf.
- * 'size' can be larger than the internal buffer.
- *
- * It will return size bytes unless there was an error, in which case it will
- * return as many as it managed to read (assuming blocking fd's which
- * all current QEMUFile are)
- */
-int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
-{
-    int pending = size;
-    int done = 0;
-
-    while (pending > 0) {
-        int res;
-
-        res = qemu_peek_buffer(f, buf, MIN(pending, IO_BUF_SIZE), 0);
-        if (res == 0) {
-            return done;
-        }
-        qemu_file_skip(f, res);
-        buf += res;
-        pending -= res;
-        done += res;
-    }
-    return done;
-}
-
-/*
- * Peeks a single byte from the buffer; this isn't guaranteed to work if
- * offset leaves a gap after the previous read/peeked data.
- */
-int qemu_peek_byte(QEMUFile *f, int offset)
-{
-    int index = f->buf_index + offset;
-
-    assert(!qemu_file_is_writable(f));
-    assert(offset < IO_BUF_SIZE);
-
-    if (index >= f->buf_size) {
-        qemu_fill_buffer(f);
-        index = f->buf_index + offset;
-        if (index >= f->buf_size) {
-            return 0;
-        }
-    }
-    return f->buf[index];
-}
-
-int qemu_get_byte(QEMUFile *f)
-{
-    int result;
-
-    result = qemu_peek_byte(f, 0);
-    qemu_file_skip(f, 1);
-    return result;
-}
-
-int64_t qemu_ftell(QEMUFile *f)
-{
-    qemu_fflush(f);
-    return f->pos;
-}
-
-int qemu_file_rate_limit(QEMUFile *f)
-{
-    if (qemu_file_get_error(f)) {
-        return 1;
-    }
-    if (f->xfer_limit > 0 && f->bytes_xfer > f->xfer_limit) {
-        return 1;
-    }
-    return 0;
-}
-
-int64_t qemu_file_get_rate_limit(QEMUFile *f)
-{
-    return f->xfer_limit;
-}
-
-void qemu_file_set_rate_limit(QEMUFile *f, int64_t limit)
-{
-    f->xfer_limit = limit;
-}
-
-void qemu_file_reset_rate_limit(QEMUFile *f)
-{
-    f->bytes_xfer = 0;
-}
-
-void qemu_put_be16(QEMUFile *f, unsigned int v)
-{
-    qemu_put_byte(f, v >> 8);
-    qemu_put_byte(f, v);
-}
-
-void qemu_put_be32(QEMUFile *f, unsigned int v)
-{
-    qemu_put_byte(f, v >> 24);
-    qemu_put_byte(f, v >> 16);
-    qemu_put_byte(f, v >> 8);
-    qemu_put_byte(f, v);
-}
-
-void qemu_put_be64(QEMUFile *f, uint64_t v)
-{
-    qemu_put_be32(f, v >> 32);
-    qemu_put_be32(f, v);
-}
-
-unsigned int qemu_get_be16(QEMUFile *f)
-{
-    unsigned int v;
-    v = qemu_get_byte(f) << 8;
-    v |= qemu_get_byte(f);
-    return v;
-}
-
-unsigned int qemu_get_be32(QEMUFile *f)
-{
-    unsigned int v;
-    v = qemu_get_byte(f) << 24;
-    v |= qemu_get_byte(f) << 16;
-    v |= qemu_get_byte(f) << 8;
-    v |= qemu_get_byte(f);
-    return v;
-}
-
-uint64_t qemu_get_be64(QEMUFile *f)
-{
-    uint64_t v;
-    v = (uint64_t)qemu_get_be32(f) << 32;
-    v |= qemu_get_be32(f);
-    return v;
-}
-
-#define QSB_CHUNK_SIZE      (1 << 10)
-#define QSB_MAX_CHUNK_SIZE  (16 * QSB_CHUNK_SIZE)
-
-/**
- * Create a QEMUSizedBuffer
- * This type of buffer uses scatter-gather lists internally and
- * can grow to any size. Any data array in the scatter-gather list
- * can hold different amount of bytes.
- *
- * @buffer: Optional buffer to copy into the QSB
- * @len: size of initial buffer; if @buffer is given, buffer must
- *       hold at least len bytes
- *
- * Returns a pointer to a QEMUSizedBuffer or NULL on allocation failure
- */
-QEMUSizedBuffer *qsb_create(const uint8_t *buffer, size_t len)
-{
-    QEMUSizedBuffer *qsb;
-    size_t alloc_len, num_chunks, i, to_copy;
-    size_t chunk_size = (len > QSB_MAX_CHUNK_SIZE)
-                        ? QSB_MAX_CHUNK_SIZE
-                        : QSB_CHUNK_SIZE;
-
-    num_chunks = DIV_ROUND_UP(len ? len : QSB_CHUNK_SIZE, chunk_size);
-    alloc_len = num_chunks * chunk_size;
-
-    qsb = g_try_new0(QEMUSizedBuffer, 1);
-    if (!qsb) {
-        return NULL;
-    }
-
-    qsb->iov = g_try_new0(struct iovec, num_chunks);
-    if (!qsb->iov) {
-        g_free(qsb);
-        return NULL;
-    }
-
-    qsb->n_iov = num_chunks;
-
-    for (i = 0; i < num_chunks; i++) {
-        qsb->iov[i].iov_base = g_try_malloc0(chunk_size);
-        if (!qsb->iov[i].iov_base) {
-            /* qsb_free is safe since g_free can cope with NULL */
-            qsb_free(qsb);
-            return NULL;
-        }
-
-        qsb->iov[i].iov_len = chunk_size;
-        if (buffer) {
-            to_copy = (len - qsb->used) > chunk_size
-                      ? chunk_size : (len - qsb->used);
-            memcpy(qsb->iov[i].iov_base, &buffer[qsb->used], to_copy);
-            qsb->used += to_copy;
-        }
-    }
-
-    qsb->size = alloc_len;
-
-    return qsb;
-}
-
-/**
- * Free the QEMUSizedBuffer
- *
- * @qsb: The QEMUSizedBuffer to free
- */
-void qsb_free(QEMUSizedBuffer *qsb)
-{
-    size_t i;
-
-    if (!qsb) {
-        return;
-    }
-
-    for (i = 0; i < qsb->n_iov; i++) {
-        g_free(qsb->iov[i].iov_base);
-    }
-    g_free(qsb->iov);
-    g_free(qsb);
-}
-
-/**
- * Get the number of used bytes in the QEMUSizedBuffer
- *
- * @qsb: A QEMUSizedBuffer
- *
- * Returns the number of bytes currently used in this buffer
- */
-size_t qsb_get_length(const QEMUSizedBuffer *qsb)
-{
-    return qsb->used;
-}
-
-/**
- * Set the length of the buffer; the primary usage of this
- * function is to truncate the number of used bytes in the buffer.
- * The size will not be extended beyond the current number of
- * allocated bytes in the QEMUSizedBuffer.
- *
- * @qsb: A QEMUSizedBuffer
- * @new_len: The new length of bytes in the buffer
- *
- * Returns the number of bytes the buffer was truncated or extended
- * to.
- */
-size_t qsb_set_length(QEMUSizedBuffer *qsb, size_t new_len)
-{
-    if (new_len <= qsb->size) {
-        qsb->used = new_len;
-    } else {
-        qsb->used = qsb->size;
-    }
-    return qsb->used;
-}
-
-/**
- * Get the iovec that holds the data for a given position @pos.
- *
- * @qsb: A QEMUSizedBuffer
- * @pos: The index of a byte in the buffer
- * @d_off: Pointer to an offset that this function will indicate
- *         at what position within the returned iovec the byte
- *         is to be found
- *
- * Returns the index of the iovec that holds the byte at the given
- * index @pos in the byte stream; a negative number if the iovec
- * for the given position @pos does not exist.
- */
-static ssize_t qsb_get_iovec(const QEMUSizedBuffer *qsb,
-                             off_t pos, off_t *d_off)
-{
-    ssize_t i;
-    off_t curr = 0;
-
-    if (pos > qsb->used) {
-        return -1;
-    }
-
-    for (i = 0; i < qsb->n_iov; i++) {
-        if (curr + qsb->iov[i].iov_len > pos) {
-            *d_off = pos - curr;
-            return i;
-        }
-        curr += qsb->iov[i].iov_len;
-    }
-    return -1;
-}
-
-/*
- * Convert the QEMUSizedBuffer into a flat buffer.
- *
- * Note: If at all possible, try to avoid this function since it
- *       may unnecessarily copy memory around.
- *
- * @qsb: pointer to QEMUSizedBuffer
- * @start: offset to start at
- * @count: number of bytes to copy
- * @buf: a pointer to a buffer to write into (at least @count bytes)
- *
- * Returns the number of bytes copied into the output buffer
- */
-ssize_t qsb_get_buffer(const QEMUSizedBuffer *qsb, off_t start,
-                       size_t count, uint8_t *buffer)
-{
-    const struct iovec *iov;
-    size_t to_copy, all_copy;
-    ssize_t index;
-    off_t s_off;
-    off_t d_off = 0;
-    char *s;
-
-    if (start > qsb->used) {
-        return 0;
-    }
-
-    all_copy = qsb->used - start;
-    if (all_copy > count) {
-        all_copy = count;
-    } else {
-        count = all_copy;
-    }
-
-    index = qsb_get_iovec(qsb, start, &s_off);
-    if (index < 0) {
-        return 0;
-    }
-
-    while (all_copy > 0) {
-        iov = &qsb->iov[index];
-
-        s = iov->iov_base;
-
-        to_copy = iov->iov_len - s_off;
-        if (to_copy > all_copy) {
-            to_copy = all_copy;
-        }
-        memcpy(&buffer[d_off], &s[s_off], to_copy);
-
-        d_off += to_copy;
-        all_copy -= to_copy;
-
-        s_off = 0;
-        index++;
-    }
-
-    return count;
-}
-
-/**
- * Grow the QEMUSizedBuffer to the given size and allocate
- * memory for it.
- *
- * @qsb: A QEMUSizedBuffer
- * @new_size: The new size of the buffer
- *
- * Return:
- *    a negative error code in case of memory allocation failure
- * or
- *    the new size of the buffer. The returned size may be greater or equal
- *    to @new_size.
- */
-static ssize_t qsb_grow(QEMUSizedBuffer *qsb, size_t new_size)
-{
-    size_t needed_chunks, i;
-
-    if (qsb->size < new_size) {
-        struct iovec *new_iov;
-        size_t size_diff = new_size - qsb->size;
-        size_t chunk_size = (size_diff > QSB_MAX_CHUNK_SIZE)
-                             ? QSB_MAX_CHUNK_SIZE : QSB_CHUNK_SIZE;
-
-        needed_chunks = DIV_ROUND_UP(size_diff, chunk_size);
-
-        new_iov = g_try_new(struct iovec, qsb->n_iov + needed_chunks);
-        if (new_iov == NULL) {
-            return -ENOMEM;
-        }
-
-        /* Allocate new chunks as needed into new_iov */
-        for (i = qsb->n_iov; i < qsb->n_iov + needed_chunks; i++) {
-            new_iov[i].iov_base = g_try_malloc0(chunk_size);
-            new_iov[i].iov_len = chunk_size;
-            if (!new_iov[i].iov_base) {
-                size_t j;
-
-                /* Free previously allocated new chunks */
-                for (j = qsb->n_iov; j < i; j++) {
-                    g_free(new_iov[j].iov_base);
-                }
-                g_free(new_iov);
-
-                return -ENOMEM;
-            }
-        }
-
-        /*
-         * Now we can't get any allocation errors, copy over to new iov
-         * and switch.
-         */
-        for (i = 0; i < qsb->n_iov; i++) {
-            new_iov[i] = qsb->iov[i];
-        }
-
-        qsb->n_iov += needed_chunks;
-        g_free(qsb->iov);
-        qsb->iov = new_iov;
-        qsb->size += (needed_chunks * chunk_size);
-    }
-
-    return qsb->size;
-}
-
-/**
- * Write into the QEMUSizedBuffer at a given position and a given
- * number of bytes. This function will automatically grow the
- * QEMUSizedBuffer.
- *
- * @qsb: A QEMUSizedBuffer
- * @source: A byte array to copy data from
- * @pos: The position within the @qsb to write data to
- * @size: The number of bytes to copy into the @qsb
- *
- * Returns @size or a negative error code in case of memory allocation failure,
- *           or with an invalid 'pos'
- */
-ssize_t qsb_write_at(QEMUSizedBuffer *qsb, const uint8_t *source,
-                     off_t pos, size_t count)
-{
-    ssize_t rc = qsb_grow(qsb, pos + count);
-    size_t to_copy;
-    size_t all_copy = count;
-    const struct iovec *iov;
-    ssize_t index;
-    char *dest;
-    off_t d_off, s_off = 0;
-
-    if (rc < 0) {
-        return rc;
-    }
-
-    if (pos + count > qsb->used) {
-        qsb->used = pos + count;
-    }
-
-    index = qsb_get_iovec(qsb, pos, &d_off);
-    if (index < 0) {
-        return -EINVAL;
-    }
-
-    while (all_copy > 0) {
-        iov = &qsb->iov[index];
-
-        dest = iov->iov_base;
-
-        to_copy = iov->iov_len - d_off;
-        if (to_copy > all_copy) {
-            to_copy = all_copy;
-        }
-
-        memcpy(&dest[d_off], &source[s_off], to_copy);
-
-        s_off += to_copy;
-        all_copy -= to_copy;
-
-        d_off = 0;
-        index++;
-    }
-
-    return count;
-}
-
-/**
- * Create a deep copy of the given QEMUSizedBuffer.
- *
- * @qsb: A QEMUSizedBuffer
- *
- * Returns a clone of @qsb or NULL on allocation failure
- */
-QEMUSizedBuffer *qsb_clone(const QEMUSizedBuffer *qsb)
-{
-    QEMUSizedBuffer *out = qsb_create(NULL, qsb_get_length(qsb));
-    size_t i;
-    ssize_t res;
-    off_t pos = 0;
-
-    if (!out) {
-        return NULL;
-    }
-
-    for (i = 0; i < qsb->n_iov; i++) {
-        res =  qsb_write_at(out, qsb->iov[i].iov_base,
-                            pos, qsb->iov[i].iov_len);
-        if (res < 0) {
-            qsb_free(out);
-            return NULL;
-        }
-        pos += res;
-    }
-
-    return out;
-}
-
-typedef struct QEMUBuffer {
-    QEMUSizedBuffer *qsb;
-    QEMUFile *file;
-} QEMUBuffer;
-
-static int buf_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
-{
-    QEMUBuffer *s = opaque;
-    ssize_t len = qsb_get_length(s->qsb) - pos;
-
-    if (len <= 0) {
-        return 0;
-    }
-
-    if (len > size) {
-        len = size;
-    }
-    return qsb_get_buffer(s->qsb, pos, len, buf);
-}
-
-static int buf_put_buffer(void *opaque, const uint8_t *buf,
-                          int64_t pos, int size)
-{
-    QEMUBuffer *s = opaque;
-
-    return qsb_write_at(s->qsb, buf, pos, size);
-}
-
-static int buf_close(void *opaque)
-{
-    QEMUBuffer *s = opaque;
-
-    qsb_free(s->qsb);
-
-    g_free(s);
-
-    return 0;
-}
-
-const QEMUSizedBuffer *qemu_buf_get(QEMUFile *f)
-{
-    QEMUBuffer *p;
-
-    qemu_fflush(f);
-
-    p = f->opaque;
-
-    return p->qsb;
-}
-
-static const QEMUFileOps buf_read_ops = {
-    .get_buffer = buf_get_buffer,
-    .close =      buf_close,
-};
-
-static const QEMUFileOps buf_write_ops = {
-    .put_buffer = buf_put_buffer,
-    .close =      buf_close,
-};
-
-QEMUFile *qemu_bufopen(const char *mode, QEMUSizedBuffer *input)
-{
-    QEMUBuffer *s;
-
-    if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') ||
-        mode[1] != '\0') {
-        error_report("qemu_bufopen: Argument validity check failed");
-        return NULL;
-    }
-
-    s = g_malloc0(sizeof(QEMUBuffer));
-    if (mode[0] == 'r') {
-        s->qsb = input;
-    }
-
-    if (s->qsb == NULL) {
-        s->qsb = qsb_create(NULL, 0);
-    }
-    if (!s->qsb) {
-        g_free(s);
-        error_report("qemu_bufopen: qsb_create failed");
-        return NULL;
-    }
-
-
-    if (mode[0] == 'r') {
-        s->file = qemu_fopen_ops(s, &buf_read_ops);
-    } else {
-        s->file = qemu_fopen_ops(s, &buf_write_ops);
-    }
-    return s->file;
-}
index 16f0e4c805c08a18db85df70f88b55b805dad326..faba9cb58de8c00dc72cf5afe758e35f49bf3aac 100644 (file)
@@ -48,8 +48,11 @@ check-unit-y += tests/test-hbitmap$(EXESUF)
 check-unit-y += tests/test-x86-cpuid$(EXESUF)
 # all code tested by test-x86-cpuid is inside topology.h
 gcov-files-test-x86-cpuid-y =
+ifeq ($(CONFIG_SOFTMMU),y)
 check-unit-y += tests/test-xbzrle$(EXESUF)
-gcov-files-test-xbzrle-y = xbzrle.c
+gcov-files-test-xbzrle-y = migration/xbzrle.c
+check-unit-$(CONFIG_POSIX) += tests/test-vmstate$(EXESUF)
+endif
 check-unit-y += tests/test-cutils$(EXESUF)
 gcov-files-test-cutils-y += util/cutils.c
 check-unit-y += tests/test-mul64$(EXESUF)
@@ -61,7 +64,6 @@ check-unit-y += tests/test-bitops$(EXESUF)
 check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
 check-unit-y += tests/check-qom-interface$(EXESUF)
 gcov-files-check-qom-interface-y = qom/object.c
-check-unit-$(CONFIG_POSIX) += tests/test-vmstate$(EXESUF)
 check-unit-y += tests/test-qemu-opts$(EXESUF)
 gcov-files-test-qemu-opts-y = qom/test-qemu-opts.c
 
@@ -247,7 +249,7 @@ tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(block-obj-y) libqemu
 tests/test-iov$(EXESUF): tests/test-iov.o libqemuutil.a
 tests/test-hbitmap$(EXESUF): tests/test-hbitmap.o libqemuutil.a libqemustub.a
 tests/test-x86-cpuid$(EXESUF): tests/test-x86-cpuid.o
-tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o xbzrle.o page_cache.o libqemuutil.a
+tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o page_cache.o libqemuutil.a
 tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o
 tests/test-int128$(EXESUF): tests/test-int128.o
 tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
@@ -258,7 +260,7 @@ tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
        $(test-qapi-obj-y) \
        libqemuutil.a libqemustub.a
 tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
-       vmstate.o qemu-file.o qemu-file-unix.o \
+       migration/vmstate.o migration/qemu-file.o migration/qemu-file-unix.o \
        libqemuutil.a libqemustub.a
 
 tests/test-qapi-types.c tests/test-qapi-types.h :\
diff --git a/vmstate.c b/vmstate.c
deleted file mode 100644 (file)
index 3dde574..0000000
--- a/vmstate.c
+++ /dev/null
@@ -1,687 +0,0 @@
-#include "qemu-common.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
-#include "migration/vmstate.h"
-#include "qemu/bitops.h"
-#include "trace.h"
-
-static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
-                                    void *opaque);
-static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
-                                   void *opaque);
-
-static int vmstate_n_elems(void *opaque, VMStateField *field)
-{
-    int n_elems = 1;
-
-    if (field->flags & VMS_ARRAY) {
-        n_elems = field->num;
-    } else if (field->flags & VMS_VARRAY_INT32) {
-        n_elems = *(int32_t *)(opaque+field->num_offset);
-    } else if (field->flags & VMS_VARRAY_UINT32) {
-        n_elems = *(uint32_t *)(opaque+field->num_offset);
-    } else if (field->flags & VMS_VARRAY_UINT16) {
-        n_elems = *(uint16_t *)(opaque+field->num_offset);
-    } else if (field->flags & VMS_VARRAY_UINT8) {
-        n_elems = *(uint8_t *)(opaque+field->num_offset);
-    }
-
-    return n_elems;
-}
-
-static int vmstate_size(void *opaque, VMStateField *field)
-{
-    int size = field->size;
-
-    if (field->flags & VMS_VBUFFER) {
-        size = *(int32_t *)(opaque+field->size_offset);
-        if (field->flags & VMS_MULTIPLY) {
-            size *= field->size;
-        }
-    }
-
-    return size;
-}
-
-static void *vmstate_base_addr(void *opaque, VMStateField *field, bool alloc)
-{
-    void *base_addr = opaque + field->offset;
-
-    if (field->flags & VMS_POINTER) {
-        if (alloc && (field->flags & VMS_ALLOC)) {
-            gsize size = 0;
-            if (field->flags & VMS_VBUFFER) {
-                size = vmstate_size(opaque, field);
-            } else {
-                int n_elems = vmstate_n_elems(opaque, field);
-                if (n_elems) {
-                    size = n_elems * field->size;
-                }
-            }
-            if (size) {
-                *((void **)base_addr + field->start) = g_malloc(size);
-            }
-        }
-        base_addr = *(void **)base_addr + field->start;
-    }
-
-    return base_addr;
-}
-
-int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
-                       void *opaque, int version_id)
-{
-    VMStateField *field = vmsd->fields;
-    int ret;
-
-    if (version_id > vmsd->version_id) {
-        return -EINVAL;
-    }
-    if  (version_id < vmsd->minimum_version_id) {
-        if (vmsd->load_state_old &&
-            version_id >= vmsd->minimum_version_id_old) {
-            return vmsd->load_state_old(f, opaque, version_id);
-        }
-        return -EINVAL;
-    }
-    if (vmsd->pre_load) {
-        int ret = vmsd->pre_load(opaque);
-        if (ret) {
-            return ret;
-        }
-    }
-    while (field->name) {
-        if ((field->field_exists &&
-             field->field_exists(opaque, version_id)) ||
-            (!field->field_exists &&
-             field->version_id <= version_id)) {
-            void *base_addr = vmstate_base_addr(opaque, field, true);
-            int i, n_elems = vmstate_n_elems(opaque, field);
-            int size = vmstate_size(opaque, field);
-
-            for (i = 0; i < n_elems; i++) {
-                void *addr = base_addr + size * i;
-
-                if (field->flags & VMS_ARRAY_OF_POINTER) {
-                    addr = *(void **)addr;
-                }
-                if (field->flags & VMS_STRUCT) {
-                    ret = vmstate_load_state(f, field->vmsd, addr,
-                                             field->vmsd->version_id);
-                } else {
-                    ret = field->info->get(f, addr, size);
-
-                }
-                if (ret >= 0) {
-                    ret = qemu_file_get_error(f);
-                }
-                if (ret < 0) {
-                    qemu_file_set_error(f, ret);
-                    trace_vmstate_load_field_error(field->name, ret);
-                    return ret;
-                }
-            }
-        } else if (field->flags & VMS_MUST_EXIST) {
-            fprintf(stderr, "Input validation failed: %s/%s\n",
-                    vmsd->name, field->name);
-            return -1;
-        }
-        field++;
-    }
-    ret = vmstate_subsection_load(f, vmsd, opaque);
-    if (ret != 0) {
-        return ret;
-    }
-    if (vmsd->post_load) {
-        return vmsd->post_load(opaque, version_id);
-    }
-    return 0;
-}
-
-void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
-                        void *opaque)
-{
-    VMStateField *field = vmsd->fields;
-
-    if (vmsd->pre_save) {
-        vmsd->pre_save(opaque);
-    }
-    while (field->name) {
-        if (!field->field_exists ||
-            field->field_exists(opaque, vmsd->version_id)) {
-            void *base_addr = vmstate_base_addr(opaque, field, false);
-            int i, n_elems = vmstate_n_elems(opaque, field);
-            int size = vmstate_size(opaque, field);
-
-            for (i = 0; i < n_elems; i++) {
-                void *addr = base_addr + size * i;
-
-                if (field->flags & VMS_ARRAY_OF_POINTER) {
-                    addr = *(void **)addr;
-                }
-                if (field->flags & VMS_STRUCT) {
-                    vmstate_save_state(f, field->vmsd, addr);
-                } else {
-                    field->info->put(f, addr, size);
-                }
-            }
-        } else {
-            if (field->flags & VMS_MUST_EXIST) {
-                fprintf(stderr, "Output state validation failed: %s/%s\n",
-                        vmsd->name, field->name);
-                assert(!(field->flags & VMS_MUST_EXIST));
-            }
-        }
-        field++;
-    }
-    vmstate_subsection_save(f, vmsd, opaque);
-}
-
-static const VMStateDescription *
-    vmstate_get_subsection(const VMStateSubsection *sub, char *idstr)
-{
-    while (sub && sub->needed) {
-        if (strcmp(idstr, sub->vmsd->name) == 0) {
-            return sub->vmsd;
-        }
-        sub++;
-    }
-    return NULL;
-}
-
-static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
-                                   void *opaque)
-{
-    while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) {
-        char idstr[256];
-        int ret;
-        uint8_t version_id, len, size;
-        const VMStateDescription *sub_vmsd;
-
-        len = qemu_peek_byte(f, 1);
-        if (len < strlen(vmsd->name) + 1) {
-            /* subsection name has be be "section_name/a" */
-            return 0;
-        }
-        size = qemu_peek_buffer(f, (uint8_t *)idstr, len, 2);
-        if (size != len) {
-            return 0;
-        }
-        idstr[size] = 0;
-
-        if (strncmp(vmsd->name, idstr, strlen(vmsd->name)) != 0) {
-            /* it don't have a valid subsection name */
-            return 0;
-        }
-        sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
-        if (sub_vmsd == NULL) {
-            return -ENOENT;
-        }
-        qemu_file_skip(f, 1); /* subsection */
-        qemu_file_skip(f, 1); /* len */
-        qemu_file_skip(f, len); /* idstr */
-        version_id = qemu_get_be32(f);
-
-        ret = vmstate_load_state(f, sub_vmsd, opaque, version_id);
-        if (ret) {
-            return ret;
-        }
-    }
-    return 0;
-}
-
-static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
-                                    void *opaque)
-{
-    const VMStateSubsection *sub = vmsd->subsections;
-
-    while (sub && sub->needed) {
-        if (sub->needed(opaque)) {
-            const VMStateDescription *vmsd = sub->vmsd;
-            uint8_t len;
-
-            qemu_put_byte(f, QEMU_VM_SUBSECTION);
-            len = strlen(vmsd->name);
-            qemu_put_byte(f, len);
-            qemu_put_buffer(f, (uint8_t *)vmsd->name, len);
-            qemu_put_be32(f, vmsd->version_id);
-            vmstate_save_state(f, vmsd, opaque);
-        }
-        sub++;
-    }
-}
-
-/* bool */
-
-static int get_bool(QEMUFile *f, void *pv, size_t size)
-{
-    bool *v = pv;
-    *v = qemu_get_byte(f);
-    return 0;
-}
-
-static void put_bool(QEMUFile *f, void *pv, size_t size)
-{
-    bool *v = pv;
-    qemu_put_byte(f, *v);
-}
-
-const VMStateInfo vmstate_info_bool = {
-    .name = "bool",
-    .get  = get_bool,
-    .put  = put_bool,
-};
-
-/* 8 bit int */
-
-static int get_int8(QEMUFile *f, void *pv, size_t size)
-{
-    int8_t *v = pv;
-    qemu_get_s8s(f, v);
-    return 0;
-}
-
-static void put_int8(QEMUFile *f, void *pv, size_t size)
-{
-    int8_t *v = pv;
-    qemu_put_s8s(f, v);
-}
-
-const VMStateInfo vmstate_info_int8 = {
-    .name = "int8",
-    .get  = get_int8,
-    .put  = put_int8,
-};
-
-/* 16 bit int */
-
-static int get_int16(QEMUFile *f, void *pv, size_t size)
-{
-    int16_t *v = pv;
-    qemu_get_sbe16s(f, v);
-    return 0;
-}
-
-static void put_int16(QEMUFile *f, void *pv, size_t size)
-{
-    int16_t *v = pv;
-    qemu_put_sbe16s(f, v);
-}
-
-const VMStateInfo vmstate_info_int16 = {
-    .name = "int16",
-    .get  = get_int16,
-    .put  = put_int16,
-};
-
-/* 32 bit int */
-
-static int get_int32(QEMUFile *f, void *pv, size_t size)
-{
-    int32_t *v = pv;
-    qemu_get_sbe32s(f, v);
-    return 0;
-}
-
-static void put_int32(QEMUFile *f, void *pv, size_t size)
-{
-    int32_t *v = pv;
-    qemu_put_sbe32s(f, v);
-}
-
-const VMStateInfo vmstate_info_int32 = {
-    .name = "int32",
-    .get  = get_int32,
-    .put  = put_int32,
-};
-
-/* 32 bit int. See that the received value is the same than the one
-   in the field */
-
-static int get_int32_equal(QEMUFile *f, void *pv, size_t size)
-{
-    int32_t *v = pv;
-    int32_t v2;
-    qemu_get_sbe32s(f, &v2);
-
-    if (*v == v2) {
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_int32_equal = {
-    .name = "int32 equal",
-    .get  = get_int32_equal,
-    .put  = put_int32,
-};
-
-/* 32 bit int. Check that the received value is non-negative
- * and less than or equal to the one in the field.
- */
-
-static int get_int32_le(QEMUFile *f, void *pv, size_t size)
-{
-    int32_t *cur = pv;
-    int32_t loaded;
-    qemu_get_sbe32s(f, &loaded);
-
-    if (loaded >= 0 && loaded <= *cur) {
-        *cur = loaded;
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_int32_le = {
-    .name = "int32 le",
-    .get  = get_int32_le,
-    .put  = put_int32,
-};
-
-/* 64 bit int */
-
-static int get_int64(QEMUFile *f, void *pv, size_t size)
-{
-    int64_t *v = pv;
-    qemu_get_sbe64s(f, v);
-    return 0;
-}
-
-static void put_int64(QEMUFile *f, void *pv, size_t size)
-{
-    int64_t *v = pv;
-    qemu_put_sbe64s(f, v);
-}
-
-const VMStateInfo vmstate_info_int64 = {
-    .name = "int64",
-    .get  = get_int64,
-    .put  = put_int64,
-};
-
-/* 8 bit unsigned int */
-
-static int get_uint8(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t *v = pv;
-    qemu_get_8s(f, v);
-    return 0;
-}
-
-static void put_uint8(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t *v = pv;
-    qemu_put_8s(f, v);
-}
-
-const VMStateInfo vmstate_info_uint8 = {
-    .name = "uint8",
-    .get  = get_uint8,
-    .put  = put_uint8,
-};
-
-/* 16 bit unsigned int */
-
-static int get_uint16(QEMUFile *f, void *pv, size_t size)
-{
-    uint16_t *v = pv;
-    qemu_get_be16s(f, v);
-    return 0;
-}
-
-static void put_uint16(QEMUFile *f, void *pv, size_t size)
-{
-    uint16_t *v = pv;
-    qemu_put_be16s(f, v);
-}
-
-const VMStateInfo vmstate_info_uint16 = {
-    .name = "uint16",
-    .get  = get_uint16,
-    .put  = put_uint16,
-};
-
-/* 32 bit unsigned int */
-
-static int get_uint32(QEMUFile *f, void *pv, size_t size)
-{
-    uint32_t *v = pv;
-    qemu_get_be32s(f, v);
-    return 0;
-}
-
-static void put_uint32(QEMUFile *f, void *pv, size_t size)
-{
-    uint32_t *v = pv;
-    qemu_put_be32s(f, v);
-}
-
-const VMStateInfo vmstate_info_uint32 = {
-    .name = "uint32",
-    .get  = get_uint32,
-    .put  = put_uint32,
-};
-
-/* 32 bit uint. See that the received value is the same than the one
-   in the field */
-
-static int get_uint32_equal(QEMUFile *f, void *pv, size_t size)
-{
-    uint32_t *v = pv;
-    uint32_t v2;
-    qemu_get_be32s(f, &v2);
-
-    if (*v == v2) {
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_uint32_equal = {
-    .name = "uint32 equal",
-    .get  = get_uint32_equal,
-    .put  = put_uint32,
-};
-
-/* 64 bit unsigned int */
-
-static int get_uint64(QEMUFile *f, void *pv, size_t size)
-{
-    uint64_t *v = pv;
-    qemu_get_be64s(f, v);
-    return 0;
-}
-
-static void put_uint64(QEMUFile *f, void *pv, size_t size)
-{
-    uint64_t *v = pv;
-    qemu_put_be64s(f, v);
-}
-
-const VMStateInfo vmstate_info_uint64 = {
-    .name = "uint64",
-    .get  = get_uint64,
-    .put  = put_uint64,
-};
-
-/* 64 bit unsigned int. See that the received value is the same than the one
-   in the field */
-
-static int get_uint64_equal(QEMUFile *f, void *pv, size_t size)
-{
-    uint64_t *v = pv;
-    uint64_t v2;
-    qemu_get_be64s(f, &v2);
-
-    if (*v == v2) {
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_uint64_equal = {
-    .name = "int64 equal",
-    .get  = get_uint64_equal,
-    .put  = put_uint64,
-};
-
-/* 8 bit int. See that the received value is the same than the one
-   in the field */
-
-static int get_uint8_equal(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t *v = pv;
-    uint8_t v2;
-    qemu_get_8s(f, &v2);
-
-    if (*v == v2) {
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_uint8_equal = {
-    .name = "uint8 equal",
-    .get  = get_uint8_equal,
-    .put  = put_uint8,
-};
-
-/* 16 bit unsigned int int. See that the received value is the same than the one
-   in the field */
-
-static int get_uint16_equal(QEMUFile *f, void *pv, size_t size)
-{
-    uint16_t *v = pv;
-    uint16_t v2;
-    qemu_get_be16s(f, &v2);
-
-    if (*v == v2) {
-        return 0;
-    }
-    return -EINVAL;
-}
-
-const VMStateInfo vmstate_info_uint16_equal = {
-    .name = "uint16 equal",
-    .get  = get_uint16_equal,
-    .put  = put_uint16,
-};
-
-/* floating point */
-
-static int get_float64(QEMUFile *f, void *pv, size_t size)
-{
-    float64 *v = pv;
-
-    *v = make_float64(qemu_get_be64(f));
-    return 0;
-}
-
-static void put_float64(QEMUFile *f, void *pv, size_t size)
-{
-    uint64_t *v = pv;
-
-    qemu_put_be64(f, float64_val(*v));
-}
-
-const VMStateInfo vmstate_info_float64 = {
-    .name = "float64",
-    .get  = get_float64,
-    .put  = put_float64,
-};
-
-/* uint8_t buffers */
-
-static int get_buffer(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t *v = pv;
-    qemu_get_buffer(f, v, size);
-    return 0;
-}
-
-static void put_buffer(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t *v = pv;
-    qemu_put_buffer(f, v, size);
-}
-
-const VMStateInfo vmstate_info_buffer = {
-    .name = "buffer",
-    .get  = get_buffer,
-    .put  = put_buffer,
-};
-
-/* unused buffers: space that was used for some fields that are
-   not useful anymore */
-
-static int get_unused_buffer(QEMUFile *f, void *pv, size_t size)
-{
-    uint8_t buf[1024];
-    int block_len;
-
-    while (size > 0) {
-        block_len = MIN(sizeof(buf), size);
-        size -= block_len;
-        qemu_get_buffer(f, buf, block_len);
-    }
-   return 0;
-}
-
-static void put_unused_buffer(QEMUFile *f, void *pv, size_t size)
-{
-    static const uint8_t buf[1024];
-    int block_len;
-
-    while (size > 0) {
-        block_len = MIN(sizeof(buf), size);
-        size -= block_len;
-        qemu_put_buffer(f, buf, block_len);
-    }
-}
-
-const VMStateInfo vmstate_info_unused_buffer = {
-    .name = "unused_buffer",
-    .get  = get_unused_buffer,
-    .put  = put_unused_buffer,
-};
-
-/* bitmaps (as defined by bitmap.h). Note that size here is the size
- * of the bitmap in bits. The on-the-wire format of a bitmap is 64
- * bit words with the bits in big endian order. The in-memory format
- * is an array of 'unsigned long', which may be either 32 or 64 bits.
- */
-/* This is the number of 64 bit words sent over the wire */
-#define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, 64)
-static int get_bitmap(QEMUFile *f, void *pv, size_t size)
-{
-    unsigned long *bmp = pv;
-    int i, idx = 0;
-    for (i = 0; i < BITS_TO_U64S(size); i++) {
-        uint64_t w = qemu_get_be64(f);
-        bmp[idx++] = w;
-        if (sizeof(unsigned long) == 4 && idx < BITS_TO_LONGS(size)) {
-            bmp[idx++] = w >> 32;
-        }
-    }
-    return 0;
-}
-
-static void put_bitmap(QEMUFile *f, void *pv, size_t size)
-{
-    unsigned long *bmp = pv;
-    int i, idx = 0;
-    for (i = 0; i < BITS_TO_U64S(size); i++) {
-        uint64_t w = bmp[idx++];
-        if (sizeof(unsigned long) == 4 && idx < BITS_TO_LONGS(size)) {
-            w |= ((uint64_t)bmp[idx++]) << 32;
-        }
-        qemu_put_be64(f, w);
-    }
-}
-
-const VMStateInfo vmstate_info_bitmap = {
-    .name = "bitmap",
-    .get = get_bitmap,
-    .put = put_bitmap,
-};
diff --git a/xbzrle.c b/xbzrle.c
deleted file mode 100644 (file)
index 8e220bf..0000000
--- a/xbzrle.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Xor Based Zero Run Length Encoding
- *
- * Copyright 2013 Red Hat, Inc. and/or its affiliates
- *
- * Authors:
- *  Orit Wasserman  <owasserm@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-#include "qemu-common.h"
-#include "include/migration/migration.h"
-
-/*
-  page = zrun nzrun
-       | zrun nzrun page
-
-  zrun = length
-
-  nzrun = length byte...
-
-  length = uleb128 encoded integer
- */
-int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
-                         uint8_t *dst, int dlen)
-{
-    uint32_t zrun_len = 0, nzrun_len = 0;
-    int d = 0, i = 0;
-    long res;
-    uint8_t *nzrun_start = NULL;
-
-    g_assert(!(((uintptr_t)old_buf | (uintptr_t)new_buf | slen) %
-               sizeof(long)));
-
-    while (i < slen) {
-        /* overflow */
-        if (d + 2 > dlen) {
-            return -1;
-        }
-
-        /* not aligned to sizeof(long) */
-        res = (slen - i) % sizeof(long);
-        while (res && old_buf[i] == new_buf[i]) {
-            zrun_len++;
-            i++;
-            res--;
-        }
-
-        /* word at a time for speed */
-        if (!res) {
-            while (i < slen &&
-                   (*(long *)(old_buf + i)) == (*(long *)(new_buf + i))) {
-                i += sizeof(long);
-                zrun_len += sizeof(long);
-            }
-
-            /* go over the rest */
-            while (i < slen && old_buf[i] == new_buf[i]) {
-                zrun_len++;
-                i++;
-            }
-        }
-
-        /* buffer unchanged */
-        if (zrun_len == slen) {
-            return 0;
-        }
-
-        /* skip last zero run */
-        if (i == slen) {
-            return d;
-        }
-
-        d += uleb128_encode_small(dst + d, zrun_len);
-
-        zrun_len = 0;
-        nzrun_start = new_buf + i;
-
-        /* overflow */
-        if (d + 2 > dlen) {
-            return -1;
-        }
-        /* not aligned to sizeof(long) */
-        res = (slen - i) % sizeof(long);
-        while (res && old_buf[i] != new_buf[i]) {
-            i++;
-            nzrun_len++;
-            res--;
-        }
-
-        /* word at a time for speed, use of 32-bit long okay */
-        if (!res) {
-            /* truncation to 32-bit long okay */
-            unsigned long mask = (unsigned long)0x0101010101010101ULL;
-            while (i < slen) {
-                unsigned long xor;
-                xor = *(unsigned long *)(old_buf + i)
-                    ^ *(unsigned long *)(new_buf + i);
-                if ((xor - mask) & ~xor & (mask << 7)) {
-                    /* found the end of an nzrun within the current long */
-                    while (old_buf[i] != new_buf[i]) {
-                        nzrun_len++;
-                        i++;
-                    }
-                    break;
-                } else {
-                    i += sizeof(long);
-                    nzrun_len += sizeof(long);
-                }
-            }
-        }
-
-        d += uleb128_encode_small(dst + d, nzrun_len);
-        /* overflow */
-        if (d + nzrun_len > dlen) {
-            return -1;
-        }
-        memcpy(dst + d, nzrun_start, nzrun_len);
-        d += nzrun_len;
-        nzrun_len = 0;
-    }
-
-    return d;
-}
-
-int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen)
-{
-    int i = 0, d = 0;
-    int ret;
-    uint32_t count = 0;
-
-    while (i < slen) {
-
-        /* zrun */
-        if ((slen - i) < 2) {
-            return -1;
-        }
-
-        ret = uleb128_decode_small(src + i, &count);
-        if (ret < 0 || (i && !count)) {
-            return -1;
-        }
-        i += ret;
-        d += count;
-
-        /* overflow */
-        if (d > dlen) {
-            return -1;
-        }
-
-        /* nzrun */
-        if ((slen - i) < 2) {
-            return -1;
-        }
-
-        ret = uleb128_decode_small(src + i, &count);
-        if (ret < 0 || !count) {
-            return -1;
-        }
-        i += ret;
-
-        /* overflow */
-        if (d + count > dlen || i + count > slen) {
-            return -1;
-        }
-
-        memcpy(dst + d, src + i, count);
-        d += count;
-        i += count;
-    }
-
-    return d;
-}