return pages;
}
-static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
- ram_addr_t offset)
+static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset)
{
- if (multifd_queue_page(file, block, offset) < 0) {
+ if (multifd_queue_page(block, offset) < 0) {
return -1;
}
stat64_add(&mig_stats.normal_pages, 1);
if (migrate_multifd() &&
!migrate_multifd_flush_after_each_section()) {
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
- int ret = multifd_send_sync_main(f);
+ int ret = multifd_send_sync_main();
if (ret < 0) {
return ret;
}
* still see partially copied pages which is data corruption.
*/
if (migrate_multifd() && !migration_in_postcopy()) {
- return ram_save_multifd_page(pss->pss_channel, block, offset);
+ return ram_save_multifd_page(block, offset);
}
return ram_save_page(rs, pss);
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
- /* caller have hold iothread lock or is in a bh, so there is
+ /* caller have hold BQL or is in a bh, so there is
* no writing race against the migration bitmap
*/
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
migration_ops = g_malloc0(sizeof(MigrationOps));
migration_ops->ram_save_target_page = ram_save_target_page_legacy;
- qemu_mutex_unlock_iothread();
- ret = multifd_send_sync_main(f);
- qemu_mutex_lock_iothread();
+ bql_unlock();
+ ret = multifd_send_sync_main();
+ bql_lock();
if (ret < 0) {
return ret;
}
if (ret >= 0
&& migration_is_setup_or_active(migrate_get_current()->state)) {
if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
- ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
+ ret = multifd_send_sync_main();
if (ret < 0) {
return ret;
}
*
* Returns zero to indicate success or negative on error
*
- * Called with iothread lock
+ * Called with the BQL
*
* @f: QEMUFile where to send the data
* @opaque: RAMState pointer
}
}
- ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
+ ret = multifd_send_sync_main();
if (ret < 0) {
return ret;
}
uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
- qemu_mutex_lock_iothread();
+ bql_lock();
WITH_RCU_READ_LOCK_GUARD() {
migration_bitmap_sync_precopy(rs, false);
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
}
{
RAMBlock *block = NULL;
/* For memory_global_dirty_log_start below. */
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_mutex_lock_ramlist();
memory_global_dirty_log_sync(false);
}
ram_state->migration_dirty_pages = 0;
qemu_mutex_unlock_ramlist();
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
/* It is need to hold the global lock to call this helper */