+static unsigned long *migration_bitmap;
+static uint64_t migration_dirty_pages;
+static uint32_t last_version;
+static bool ram_bulk_stage;
+
+static inline
+ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
+ ram_addr_t start)
+{
+ unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
+ unsigned long nr = base + (start >> TARGET_PAGE_BITS);
+ uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+ unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+
+ unsigned long next;
+
+ if (ram_bulk_stage && nr > base) {
+ next = nr + 1;
+ } else {
+ next = find_next_bit(migration_bitmap, size, nr);
+ }
+
+ if (next < size) {
+ clear_bit(next, migration_bitmap);
+ migration_dirty_pages--;
+ }
+ return (next - base) << TARGET_PAGE_BITS;
+}
+
+static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
+ ram_addr_t offset)
+{
+ bool ret;
+ int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
+
+ ret = test_and_set_bit(nr, migration_bitmap);
+
+ if (!ret) {
+ migration_dirty_pages++;
+ }
+ return ret;
+}
+
+/* Needs iothread lock! */
+
+static void migration_bitmap_sync(void)
+{
+ RAMBlock *block;
+ ram_addr_t addr;
+ uint64_t num_dirty_pages_init = migration_dirty_pages;
+ MigrationState *s = migrate_get_current();
+ static int64_t start_time;
+ static int64_t bytes_xfer_prev;
+ static int64_t num_dirty_pages_period;
+ int64_t end_time;
+ int64_t bytes_xfer_now;
+
+ if (!bytes_xfer_prev) {
+ bytes_xfer_prev = ram_bytes_transferred();
+ }
+
+ if (!start_time) {
+ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ }
+
+ trace_migration_bitmap_sync_start();
+ address_space_sync_dirty_bitmap(&address_space_memory);
+
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
+ if (memory_region_test_and_clear_dirty(block->mr,
+ addr, TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ migration_bitmap_set_dirty(block->mr, addr);
+ }
+ }
+ }
+ trace_migration_bitmap_sync_end(migration_dirty_pages
+ - num_dirty_pages_init);
+ num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
+ end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
+ /* more than 1 second = 1000 millisecons */
+ if (end_time > start_time + 1000) {
+ if (migrate_auto_converge()) {
+ /* The following detection logic can be refined later. For now:
+ Check to see if the dirtied bytes is 50% more than the approx.
+ amount of bytes that just got transferred since the last time we
+ were in this routine. If that happens >N times (for now N==4)
+ we turn on the throttle down logic */
+ bytes_xfer_now = ram_bytes_transferred();
+ if (s->dirty_pages_rate &&
+ (num_dirty_pages_period * TARGET_PAGE_SIZE >
+ (bytes_xfer_now - bytes_xfer_prev)/2) &&
+ (dirty_rate_high_cnt++ > 4)) {
+ trace_migration_throttle();
+ mig_throttle_on = true;
+ dirty_rate_high_cnt = 0;
+ }
+ bytes_xfer_prev = bytes_xfer_now;
+ } else {
+ mig_throttle_on = false;
+ }
+ s->dirty_pages_rate = num_dirty_pages_period * 1000
+ / (end_time - start_time);
+ s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
+ start_time = end_time;
+ num_dirty_pages_period = 0;
+ }
+}