uint64_t unused2[4]; /* Reserved for future use */
} __attribute__((packed)) MultiFDInit_t;
+/* Multifd without compression */
+
+/**
+ * nocomp_send_setup: setup send side
+ *
+ * For no compression this function does nothing.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int nocomp_send_setup(MultiFDSendParams *p, Error **errp)
+{
+ return 0;
+}
+
+/**
+ * nocomp_send_cleanup: cleanup send side
+ *
+ * For no compression this function does nothing.
+ *
+ * @p: Params for the channel that we are using
+ */
+static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
+{
+ return;
+}
+
+/**
+ * nocomp_send_prepare: prepare date to be able to send
+ *
+ * For no compression we just have to calculate the size of the
+ * packet.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @used: number of pages used
+ * @errp: pointer to an error
+ */
+static int nocomp_send_prepare(MultiFDSendParams *p, uint32_t used,
+ Error **errp)
+{
+ p->next_packet_size = used * qemu_target_page_size();
+ p->flags |= MULTIFD_FLAG_NOCOMP;
+ return 0;
+}
+
+/**
+ * nocomp_send_write: do the actual write of the data
+ *
+ * For no compression we just have to write the data.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @used: number of pages used
+ * @errp: pointer to an error
+ */
+static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
+{
+ return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
+}
+
+/**
+ * nocomp_recv_setup: setup receive side
+ *
+ * For no compression this function does nothing.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int nocomp_recv_setup(MultiFDRecvParams *p, Error **errp)
+{
+ return 0;
+}
+
+/**
+ * nocomp_recv_cleanup: setup receive side
+ *
+ * For no compression this function does nothing.
+ *
+ * @p: Params for the channel that we are using
+ */
+static void nocomp_recv_cleanup(MultiFDRecvParams *p)
+{
+}
+
+/**
+ * nocomp_recv_pages: read the data from the channel into actual pages
+ *
+ * For no compression we just need to read things into the correct place.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @used: number of pages used
+ * @errp: pointer to an error
+ */
+static int nocomp_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp)
+{
+ uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
+
+ if (flags != MULTIFD_FLAG_NOCOMP) {
+ error_setg(errp, "multifd %d: flags received %x flags expected %x",
+ p->id, flags, MULTIFD_FLAG_NOCOMP);
+ return -1;
+ }
+ return qio_channel_readv_all(p->c, p->pages->iov, used, errp);
+}
+
+static MultiFDMethods multifd_nocomp_ops = {
+ .send_setup = nocomp_send_setup,
+ .send_cleanup = nocomp_send_cleanup,
+ .send_prepare = nocomp_send_prepare,
+ .send_write = nocomp_send_write,
+ .recv_setup = nocomp_recv_setup,
+ .recv_cleanup = nocomp_recv_cleanup,
+ .recv_pages = nocomp_recv_pages
+};
+
+static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {
+ [MULTIFD_COMPRESSION_NONE] = &multifd_nocomp_ops,
+};
+
+void multifd_register_ops(int method, MultiFDMethods *ops)
+{
+ assert(0 < method && method < MULTIFD_COMPRESSION__MAX);
+ multifd_ops[method] = ops;
+}
+
static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
{
MultiFDInit_t msg = {};
* We will use atomic operations. Only valid values are 0 and 1.
*/
int exiting;
+ /* multifd ops */
+ MultiFDMethods *ops;
} *multifd_send_state;
/*
}
for (i = 0; i < migrate_multifd_channels(); i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
+ Error *local_err = NULL;
socket_send_channel_destroy(p->c);
p->c = NULL;
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
+ multifd_send_state->ops->send_cleanup(p, &local_err);
+ if (local_err) {
+ migrate_set_error(migrate_get_current(), local_err);
+ }
}
qemu_sem_destroy(&multifd_send_state->channels_ready);
g_free(multifd_send_state->params);
uint64_t packet_num = p->packet_num;
flags = p->flags;
- p->next_packet_size = used * qemu_target_page_size();
+ if (used) {
+ ret = multifd_send_state->ops->send_prepare(p, used,
+ &local_err);
+ if (ret != 0) {
+ qemu_mutex_unlock(&p->mutex);
+ break;
+ }
+ }
multifd_send_fill_packet(p);
p->flags = 0;
p->num_packets++;
}
if (used) {
- ret = qio_channel_writev_all(p->c, p->pages->iov,
- used, &local_err);
+ ret = multifd_send_state->ops->send_write(p, used, &local_err);
if (ret != 0) {
break;
}
multifd_send_state->pages = multifd_pages_init(page_count);
qemu_sem_init(&multifd_send_state->channels_ready, 0);
atomic_set(&multifd_send_state->exiting, 0);
+ multifd_send_state->ops = multifd_ops[migrate_multifd_compression()];
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
p->name = g_strdup_printf("multifdsend_%d", i);
socket_send_channel_create(multifd_new_send_channel_async, p);
}
+
+ for (i = 0; i < thread_count; i++) {
+ MultiFDSendParams *p = &multifd_send_state->params[i];
+ Error *local_err = NULL;
+ int ret;
+
+ ret = multifd_send_state->ops->send_setup(p, &local_err);
+ if (ret) {
+ error_propagate(errp, local_err);
+ return ret;
+ }
+ }
return 0;
}
QemuSemaphore sem_sync;
/* global number of generated multifd packets */
uint64_t packet_num;
+ /* multifd ops */
+ MultiFDMethods *ops;
} *multifd_recv_state;
static void multifd_recv_terminate_threads(Error *err)
int multifd_load_cleanup(Error **errp)
{
int i;
- int ret = 0;
if (!migrate_use_multifd()) {
return 0;
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
+ multifd_recv_state->ops->recv_cleanup(p);
}
qemu_sem_destroy(&multifd_recv_state->sem_sync);
g_free(multifd_recv_state->params);
g_free(multifd_recv_state);
multifd_recv_state = NULL;
- return ret;
+ return 0;
}
void multifd_recv_sync_main(void)
used = p->pages->used;
flags = p->flags;
+ /* recv methods don't know how to handle the SYNC flag */
+ p->flags &= ~MULTIFD_FLAG_SYNC;
trace_multifd_recv(p->id, p->packet_num, used, flags,
p->next_packet_size);
p->num_packets++;
qemu_mutex_unlock(&p->mutex);
if (used) {
- ret = qio_channel_readv_all(p->c, p->pages->iov,
- used, &local_err);
+ ret = multifd_recv_state->ops->recv_pages(p, used, &local_err);
if (ret != 0) {
break;
}
multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
atomic_set(&multifd_recv_state->count, 0);
qemu_sem_init(&multifd_recv_state->sem_sync, 0);
+ multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()];
for (i = 0; i < thread_count; i++) {
MultiFDRecvParams *p = &multifd_recv_state->params[i];
p->packet = g_malloc0(p->packet_len);
p->name = g_strdup_printf("multifdrecv_%d", i);
}
+
+ for (i = 0; i < thread_count; i++) {
+ MultiFDRecvParams *p = &multifd_recv_state->params[i];
+ Error *local_err = NULL;
+ int ret;
+
+ ret = multifd_recv_state->ops->recv_setup(p, &local_err);
+ if (ret) {
+ error_propagate(errp, local_err);
+ return ret;
+ }
+ }
return 0;
}
return atomic_read(&multifd_recv_state->count) ==
migrate_multifd_channels();
}
-