#include "ram.h"
#include "qapi/error.h"
#include "qemu/notify.h"
+#include "qemu/rcu.h"
#include "sysemu/sysemu.h"
#include "sysemu/balloon.h"
#include "qemu/error-report.h"
#include "trace.h"
+#include "hw/boards.h"
/* Arbitrary limit on size of each discard command,
* keeps them around ~200 bytes
static struct PostcopyBlocktimeContext *blocktime_context_new(void)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
return ctx;
}
+static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ uint32List *list = NULL, *entry = NULL;
+ int i;
+
+ for (i = ms->smp.cpus - 1; i >= 0; i--) {
+ entry = g_new0(uint32List, 1);
+ entry->value = ctx->vcpu_blocktime[i];
+ entry->next = list;
+ list = entry;
+ }
+
+ return list;
+}
+
+/*
+ * This function just populates MigrationInfo from postcopy's
+ * blocktime context. It will not populate MigrationInfo,
+ * unless postcopy-blocktime capability was set.
+ *
+ * @info: pointer to MigrationInfo to populate
+ */
+void fill_destination_postcopy_migration_info(MigrationInfo *info)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
+
+ if (!bc) {
+ return;
+ }
+
+ info->has_postcopy_blocktime = true;
+ info->postcopy_blocktime = bc->total_blocktime;
+ info->has_postcopy_vcpu_blocktime = true;
+ info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
+}
+
+static uint32_t get_postcopy_total_blocktime(void)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
+
+ if (!bc) {
+ return 0;
+ }
+
+ return bc->total_blocktime;
+}
+
/**
* receive_ufd_features: check userfault fd features, to request only supported
* features in the future.
return false;
}
- if (getpagesize() != ram_pagesize_summary()) {
+ if (qemu_real_host_page_size != ram_pagesize_summary()) {
bool have_hp = false;
/* We've got a huge page */
#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
/* Callback from postcopy_ram_supported_by_host block iterator.
*/
-static int test_ramblock_postcopiable(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length, void *opaque)
+static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
{
- RAMBlock *rb = qemu_ram_block_by_name(block_name);
+ const char *block_name = qemu_ram_get_idstr(rb);
+ ram_addr_t length = qemu_ram_get_used_length(rb);
size_t pagesize = qemu_ram_pagesize(rb);
if (length % pagesize) {
*/
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
- long pagesize = getpagesize();
+ long pagesize = qemu_real_host_page_size;
int ufd = -1;
bool ret = false; /* Error unless we change it */
void *testarea = NULL;
}
/* We don't support postcopy with shared RAM yet */
- if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
+ if (foreach_not_ignored_block(test_ramblock_postcopiable, NULL)) {
goto out;
}
* must be done right at the start prior to pre-copy.
* opaque should be the MIS.
*/
-static int init_range(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length, void *opaque)
+static int init_range(RAMBlock *rb, void *opaque)
{
+ const char *block_name = qemu_ram_get_idstr(rb);
+ void *host_addr = qemu_ram_get_host_addr(rb);
+ ram_addr_t offset = qemu_ram_get_offset(rb);
+ ram_addr_t length = qemu_ram_get_used_length(rb);
trace_postcopy_init_range(block_name, host_addr, offset, length);
/*
* At the end of migration, undo the effects of init_range
* opaque should be the MIS.
*/
-static int cleanup_range(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length, void *opaque)
+static int cleanup_range(RAMBlock *rb, void *opaque)
{
+ const char *block_name = qemu_ram_get_idstr(rb);
+ void *host_addr = qemu_ram_get_host_addr(rb);
+ ram_addr_t offset = qemu_ram_get_offset(rb);
+ ram_addr_t length = qemu_ram_get_used_length(rb);
MigrationIncomingState *mis = opaque;
struct uffdio_range range_struct;
trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
* postcopy later; must be called prior to any precopy.
* called from arch_init's similarly named ram_postcopy_incoming_init
*/
-int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
+int postcopy_ram_incoming_init(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(init_range, NULL)) {
+ if (foreach_not_ignored_block(init_range, NULL)) {
return -1;
}
return 0;
}
+/*
+ * Manage a single vote to the QEMU balloon inhibitor for all postcopy usage,
+ * last caller wins.
+ */
+static void postcopy_balloon_inhibit(bool state)
+{
+ static bool cur_state = false;
+
+ if (state != cur_state) {
+ qemu_balloon_inhibit(state);
+ cur_state = state;
+ }
+}
+
/*
* At the end of a migration where postcopy_ram_incoming_init was called.
*/
if (mis->have_fault_thread) {
Error *local_err = NULL;
+ /* Let the fault thread quit */
+ atomic_set(&mis->fault_thread_quit, 1);
+ postcopy_fault_thread_notify(mis);
+ trace_postcopy_ram_incoming_cleanup_join();
+ qemu_thread_join(&mis->fault_thread);
+
if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
error_report_err(local_err);
return -1;
}
- if (qemu_ram_foreach_block(cleanup_range, mis)) {
+ if (foreach_not_ignored_block(cleanup_range, mis)) {
return -1;
}
- /* Let the fault thread quit */
- atomic_set(&mis->fault_thread_quit, 1);
- postcopy_fault_thread_notify(mis);
- trace_postcopy_ram_incoming_cleanup_join();
- qemu_thread_join(&mis->fault_thread);
trace_postcopy_ram_incoming_cleanup_closeuf();
close(mis->userfault_fd);
mis->have_fault_thread = false;
}
- qemu_balloon_inhibit(false);
+ postcopy_balloon_inhibit(false);
if (enable_mlock) {
if (os_mlock() < 0) {
}
}
- postcopy_state_set(POSTCOPY_INCOMING_END);
-
if (mis->postcopy_tmp_page) {
munmap(mis->postcopy_tmp_page, mis->largest_page_size);
mis->postcopy_tmp_page = NULL;
munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
mis->postcopy_tmp_zero_page = NULL;
}
+ trace_postcopy_ram_incoming_cleanup_blocktime(
+ get_postcopy_total_blocktime());
+
trace_postcopy_ram_incoming_cleanup_exit();
return 0;
}
/*
* Disable huge pages on an area
*/
-static int nhp_range(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length, void *opaque)
+static int nhp_range(RAMBlock *rb, void *opaque)
{
+ const char *block_name = qemu_ram_get_idstr(rb);
+ void *host_addr = qemu_ram_get_host_addr(rb);
+ ram_addr_t offset = qemu_ram_get_offset(rb);
+ ram_addr_t length = qemu_ram_get_used_length(rb);
trace_postcopy_nhp_range(block_name, host_addr, offset, length);
/*
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(nhp_range, mis)) {
+ if (foreach_not_ignored_block(nhp_range, mis)) {
return -1;
}
/*
* Mark the given area of RAM as requiring notification to unwritten areas
- * Used as a callback on qemu_ram_foreach_block.
+ * Used as a callback on foreach_not_ignored_block.
* host_addr: Base of area to mark
* offset: Offset in the whole ram arena
* length: Length of the section
* opaque: MigrationIncomingState pointer
* Returns 0 on success
*/
-static int ram_block_enable_notify(const char *block_name, void *host_addr,
- ram_addr_t offset, ram_addr_t length,
- void *opaque)
+static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
{
MigrationIncomingState *mis = opaque;
struct uffdio_register reg_struct;
- reg_struct.range.start = (uintptr_t)host_addr;
- reg_struct.range.len = length;
+ reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
+ reg_struct.range.len = qemu_ram_get_used_length(rb);
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
/* Now tell our userfault_fd that it's responsible for this area */
return -1;
}
if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
- RAMBlock *rb = qemu_ram_block_by_name(block_name);
qemu_ram_set_uf_zeroable(rb);
}
return 0;
}
+static int get_mem_fault_cpu_index(uint32_t pid)
+{
+ CPUState *cpu_iter;
+
+ CPU_FOREACH(cpu_iter) {
+ if (cpu_iter->thread_id == pid) {
+ trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
+ return cpu_iter->cpu_index;
+ }
+ }
+ trace_get_mem_fault_cpu_index(-1, pid);
+ return -1;
+}
+
+static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
+{
+ int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
+ dc->start_time;
+ return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
+}
+
+/*
+ * This function is being called when pagefault occurs. It
+ * tracks down vCPU blocking time.
+ *
+ * @addr: faulted host virtual address
+ * @ptid: faulted process thread id
+ * @rb: ramblock appropriate to addr
+ */
+static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
+ RAMBlock *rb)
+{
+ int cpu, already_received;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
+ uint32_t low_time_offset;
+
+ if (!dc || ptid == 0) {
+ return;
+ }
+ cpu = get_mem_fault_cpu_index(ptid);
+ if (cpu < 0) {
+ return;
+ }
+
+ low_time_offset = get_low_time_offset(dc);
+ if (dc->vcpu_addr[cpu] == 0) {
+ atomic_inc(&dc->smp_cpus_down);
+ }
+
+ atomic_xchg(&dc->last_begin, low_time_offset);
+ atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
+ atomic_xchg(&dc->vcpu_addr[cpu], addr);
+
+ /*
+ * check it here, not at the beginning of the function,
+ * due to, check could occur early than bitmap_set in
+ * qemu_ufd_copy_ioctl
+ */
+ already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
+ if (already_received) {
+ atomic_xchg(&dc->vcpu_addr[cpu], 0);
+ atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
+ atomic_dec(&dc->smp_cpus_down);
+ }
+ trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
+ cpu, already_received);
+}
+
+/*
+ * This function just provide calculated blocktime per cpu and trace it.
+ * Total blocktime is calculated in mark_postcopy_blocktime_end.
+ *
+ *
+ * Assume we have 3 CPU
+ *
+ * S1 E1 S1 E1
+ * -----***********------------xxx***************------------------------> CPU1
+ *
+ * S2 E2
+ * ------------****************xxx---------------------------------------> CPU2
+ *
+ * S3 E3
+ * ------------------------****xxx********-------------------------------> CPU3
+ *
+ * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
+ * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
+ * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
+ * it's a part of total blocktime.
+ * S1 - here is last_begin
+ * Legend of the picture is following:
+ * * - means blocktime per vCPU
+ * x - means overlapped blocktime (total blocktime)
+ *
+ * @addr: host virtual address
+ */
+static void mark_postcopy_blocktime_end(uintptr_t addr)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
+ int i, affected_cpu = 0;
+ bool vcpu_total_blocktime = false;
+ uint32_t read_vcpu_time, low_time_offset;
+
+ if (!dc) {
+ return;
+ }
+
+ low_time_offset = get_low_time_offset(dc);
+ /* lookup cpu, to clear it,
+ * that algorithm looks straighforward, but it's not
+ * optimal, more optimal algorithm is keeping tree or hash
+ * where key is address value is a list of */
+ for (i = 0; i < smp_cpus; i++) {
+ uint32_t vcpu_blocktime = 0;
+
+ read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
+ if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
+ read_vcpu_time == 0) {
+ continue;
+ }
+ atomic_xchg(&dc->vcpu_addr[i], 0);
+ vcpu_blocktime = low_time_offset - read_vcpu_time;
+ affected_cpu += 1;
+ /* we need to know is that mark_postcopy_end was due to
+ * faulted page, another possible case it's prefetched
+ * page and in that case we shouldn't be here */
+ if (!vcpu_total_blocktime &&
+ atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
+ vcpu_total_blocktime = true;
+ }
+ /* continue cycle, due to one page could affect several vCPUs */
+ dc->vcpu_blocktime[i] += vcpu_blocktime;
+ }
+
+ atomic_sub(&dc->smp_cpus_down, affected_cpu);
+ if (vcpu_total_blocktime) {
+ dc->total_blocktime += low_time_offset - atomic_fetch_add(
+ &dc->last_begin, 0);
+ }
+ trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
+ affected_cpu);
+}
+
+static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
+{
+ trace_postcopy_pause_fault_thread();
+
+ qemu_sem_wait(&mis->postcopy_pause_sem_fault);
+
+ trace_postcopy_pause_fault_thread_continued();
+
+ return true;
+}
+
/*
* Handle faults detected by the USERFAULT markings
*/
RAMBlock *rb = NULL;
trace_postcopy_ram_fault_thread_entry();
+ rcu_register_thread();
mis->last_rb = NULL; /* last RAMBlock we sent part of */
qemu_sem_post(&mis->fault_thread_sem);
break;
}
+ if (!mis->to_src_file) {
+ /*
+ * Possibly someone tells us that the return path is
+ * broken already using the event. We should hold until
+ * the channel is rebuilt.
+ */
+ if (postcopy_pause_fault_thread(mis)) {
+ mis->last_rb = NULL;
+ /* Continue to read the userfaultfd */
+ } else {
+ error_report("%s: paused but don't allow to continue",
+ __func__);
+ break;
+ }
+ }
+
if (pfd[1].revents) {
uint64_t tmp64 = 0;
rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
qemu_ram_get_idstr(rb),
- rb_offset);
+ rb_offset,
+ msg.arg.pagefault.feat.ptid);
+ mark_postcopy_blocktime_begin(
+ (uintptr_t)(msg.arg.pagefault.address),
+ msg.arg.pagefault.feat.ptid, rb);
+
+retry:
/*
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
*/
if (rb != mis->last_rb) {
mis->last_rb = rb;
- migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
- rb_offset, qemu_ram_pagesize(rb));
+ ret = migrate_send_rp_req_pages(mis,
+ qemu_ram_get_idstr(rb),
+ rb_offset,
+ qemu_ram_pagesize(rb));
} else {
/* Save some space */
- migrate_send_rp_req_pages(mis, NULL,
- rb_offset, qemu_ram_pagesize(rb));
+ ret = migrate_send_rp_req_pages(mis,
+ NULL,
+ rb_offset,
+ qemu_ram_pagesize(rb));
+ }
+
+ if (ret) {
+ /* May be network failure, try to wait for recovery */
+ if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
+ /* We got reconnected somehow, try to continue */
+ mis->last_rb = NULL;
+ goto retry;
+ } else {
+ /* This is a unavoidable fault */
+ error_report("%s: migrate_send_rp_req_pages() get %d",
+ __func__, ret);
+ break;
+ }
}
}
}
}
}
+ rcu_unregister_thread();
trace_postcopy_ram_fault_thread_exit();
g_free(pfd);
return NULL;
}
-int postcopy_ram_enable_notify(MigrationIncomingState *mis)
+int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
{
/* Open the fd for the kernel to give us userfaults */
mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
mis->have_fault_thread = true;
/* Mark so that we get notified of accesses to unwritten areas */
- if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
+ if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
+ error_report("ram_block_enable_notify failed");
return -1;
}
+ mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS, -1, 0);
+ if (mis->postcopy_tmp_page == MAP_FAILED) {
+ mis->postcopy_tmp_page = NULL;
+ error_report("%s: Failed to map postcopy_tmp_page %s",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ /*
+ * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
+ */
+ mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
+ if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
+ int e = errno;
+ mis->postcopy_tmp_zero_page = NULL;
+ error_report("%s: Failed to map large zero page %s",
+ __func__, strerror(e));
+ return -e;
+ }
+ memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
+
/*
* Ballooning can mark pages as absent while we're postcopying
* that would cause false userfaults.
*/
- qemu_balloon_inhibit(true);
+ postcopy_balloon_inhibit(true);
trace_postcopy_ram_enable_notify();
if (!ret) {
ramblock_recv_bitmap_set_range(rb, host_addr,
pagesize / qemu_target_page_size());
+ mark_postcopy_blocktime_end((uintptr_t)host_addr);
+
}
return ret;
}
qemu_ram_block_host_offset(rb,
host));
} else {
- /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
- if (!mis->postcopy_tmp_zero_page) {
- mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0);
- if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
- int e = errno;
- mis->postcopy_tmp_zero_page = NULL;
- error_report("%s: %s mapping large zero page",
- __func__, strerror(e));
- return -e;
- }
- memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
- }
- return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
- rb);
+ return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
}
}
-/*
- * Returns a target page of memory that can be mapped at a later point in time
- * using postcopy_place_page
- * The same address is used repeatedly, postcopy_place_page just takes the
- * backing page away.
- * Returns: Pointer to allocated page
- *
- */
-void *postcopy_get_tmp_page(MigrationIncomingState *mis)
+#else
+/* No target OS support, stubs just fail */
+void fill_destination_postcopy_migration_info(MigrationInfo *info)
{
- if (!mis->postcopy_tmp_page) {
- mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
- PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_ANONYMOUS, -1, 0);
- if (mis->postcopy_tmp_page == MAP_FAILED) {
- mis->postcopy_tmp_page = NULL;
- error_report("%s: %s", __func__, strerror(errno));
- return NULL;
- }
- }
-
- return mis->postcopy_tmp_page;
}
-#else
-/* No target OS support, stubs just fail */
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
error_report("%s: No OS support", __func__);
return false;
}
-int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
+int postcopy_ram_incoming_init(MigrationIncomingState *mis)
{
error_report("postcopy_ram_incoming_init: No OS support");
return -1;
return -1;
}
-int postcopy_ram_enable_notify(MigrationIncomingState *mis)
+int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
{
assert(0);
return -1;
return -1;
}
-void *postcopy_get_tmp_page(MigrationIncomingState *mis)
-{
- assert(0);
- return NULL;
-}
-
int postcopy_wake_shared(struct PostCopyFD *pcfd,
uint64_t client_addr,
RAMBlock *rb)
* asking to discard individual ranges.
*
* @ms: The current migration state.
- * @offset: the bitmap offset of the named RAMBlock in the migration
- * bitmap.
+ * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
* @name: RAMBlock that discards will operate on.
- *
- * returns: a new PDS.
*/
-PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
- const char *name)
+static PostcopyDiscardState pds = {0};
+void postcopy_discard_send_init(MigrationState *ms, const char *name)
{
- PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
-
- if (res) {
- res->ramblock_name = name;
- }
-
- return res;
+ pds.ramblock_name = name;
+ pds.cur_entry = 0;
+ pds.nsentwords = 0;
+ pds.nsentcmds = 0;
}
/**
* be sent later.
*
* @ms: Current migration state.
- * @pds: Structure initialised by postcopy_discard_send_init().
* @start,@length: a range of pages in the migration bitmap in the
* RAM block passed to postcopy_discard_send_init() (length=1 is one page)
*/
-void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
- unsigned long start, unsigned long length)
+void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
+ unsigned long length)
{
size_t tp_size = qemu_target_page_size();
/* Convert to byte offsets within the RAM block */
- pds->start_list[pds->cur_entry] = start * tp_size;
- pds->length_list[pds->cur_entry] = length * tp_size;
- trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
- pds->cur_entry++;
- pds->nsentwords++;
+ pds.start_list[pds.cur_entry] = start * tp_size;
+ pds.length_list[pds.cur_entry] = length * tp_size;
+ trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
+ pds.cur_entry++;
+ pds.nsentwords++;
- if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
+ if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
/* Full set, ship it! */
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
- pds->ramblock_name,
- pds->cur_entry,
- pds->start_list,
- pds->length_list);
- pds->nsentcmds++;
- pds->cur_entry = 0;
+ pds.ramblock_name,
+ pds.cur_entry,
+ pds.start_list,
+ pds.length_list);
+ pds.nsentcmds++;
+ pds.cur_entry = 0;
}
}
* bitmap code. Sends any outstanding discard messages, frees the PDS
*
* @ms: Current migration state.
- * @pds: Structure initialised by postcopy_discard_send_init().
*/
-void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
+void postcopy_discard_send_finish(MigrationState *ms)
{
/* Anything unsent? */
- if (pds->cur_entry) {
+ if (pds.cur_entry) {
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
- pds->ramblock_name,
- pds->cur_entry,
- pds->start_list,
- pds->length_list);
- pds->nsentcmds++;
+ pds.ramblock_name,
+ pds.cur_entry,
+ pds.start_list,
+ pds.length_list);
+ pds.nsentcmds++;
}
- trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,
- pds->nsentcmds);
-
- g_free(pds);
+ trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
+ pds.nsentcmds);
}
/*