#include "migration/vmstate.h"
#include "sysemu/block-backend.h"
-#define BLOCK_SIZE (1 << 20)
-#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
+#define BLK_MIG_BLOCK_SIZE (1 << 20)
+#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
#define BLK_MIG_FLAG_EOS 0x02
uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
if (block_mig_state.zero_blocks &&
- buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+ buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) {
flags |= BLK_MIG_FLAG_ZERO_BLOCK;
}
return;
}
- qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
+ qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE);
}
int blk_mig_active(void)
}
blk = g_new(BlkMigBlock, 1);
- blk->buf = g_malloc(BLOCK_SIZE);
+ blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
blk->bmds = bmds;
blk->sector = cur_sector;
blk->nr_sectors = nr_sectors;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
- BLOCK_SIZE, NULL, NULL);
+ BLK_MIG_BLOCK_SIZE,
+ NULL, NULL);
if (!bmds->dirty_bitmap) {
ret = -errno;
goto fail;
fail:
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
if (bmds->dirty_bitmap) {
- bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
+ bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
}
}
return ret;
BlkMigDevState *bmds;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
- bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
+ bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
}
}
bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
blk = g_new(BlkMigBlock, 1);
- blk->buf = g_malloc(BLOCK_SIZE);
+ blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
blk->bmds = bmds;
blk->sector = sector;
blk->nr_sectors = nr_sectors;
/* control the rate of transfer */
blk_mig_lock();
- while (block_mig_state.read_done * BLOCK_SIZE <
+ while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
qemu_file_get_rate_limit(f) &&
block_mig_state.submitted < MAX_PARALLEL_IO &&
(block_mig_state.submitted + block_mig_state.read_done) <
qemu_mutex_unlock_iothread();
blk_mig_lock();
- pending += block_mig_state.submitted * BLOCK_SIZE +
- block_mig_state.read_done * BLOCK_SIZE;
+ pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
+ block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
blk_mig_unlock();
/* Report at least one block pending during bulk phase */
if (pending <= max_size && !block_mig_state.bulk_completed) {
- pending = max_size + BLOCK_SIZE;
+ pending = max_size + BLK_MIG_BLOCK_SIZE;
}
DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
int nr_sectors;
int ret;
BlockDriverInfo bdi;
- int cluster_size = BLOCK_SIZE;
+ int cluster_size = BLK_MIG_BLOCK_SIZE;
do {
addr = qemu_get_be64(f);
ret = bdrv_get_info(blk_bs(blk), &bdi);
if (ret == 0 && bdi.cluster_size > 0 &&
- bdi.cluster_size <= BLOCK_SIZE &&
- BLOCK_SIZE % bdi.cluster_size == 0) {
+ bdi.cluster_size <= BLK_MIG_BLOCK_SIZE &&
+ BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) {
cluster_size = bdi.cluster_size;
} else {
- cluster_size = BLOCK_SIZE;
+ cluster_size = BLK_MIG_BLOCK_SIZE;
}
}
int64_t cur_addr;
uint8_t *cur_buf;
- buf = g_malloc(BLOCK_SIZE);
- qemu_get_buffer(f, buf, BLOCK_SIZE);
- for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
+ buf = g_malloc(BLK_MIG_BLOCK_SIZE);
+ qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE);
+ for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) {
cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
cur_buf = buf + i * cluster_size;
if ((!block_mig_state.zero_blocks ||
- cluster_size < BLOCK_SIZE) &&
+ cluster_size < BLK_MIG_BLOCK_SIZE) &&
buffer_is_zero(cur_buf, cluster_size)) {
ret = blk_pwrite_zeroes(blk, cur_addr,
cluster_size,