]> git.proxmox.com Git - mirror_qemu.git/blame - migration/rdma.c
migration: Fix warning caused by missing declaration of vmstate_dummy
[mirror_qemu.git] / migration / rdma.c
CommitLineData
2da776db
MH
1/*
2 * RDMA protocol and interfaces
3 *
4 * Copyright IBM, Corp. 2010-2013
5 *
6 * Authors:
7 * Michael R. Hines <mrhines@us.ibm.com>
8 * Jiuxing Liu <jl@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or
11 * later. See the COPYING file in the top-level directory.
12 *
13 */
14#include "qemu-common.h"
15#include "migration/migration.h"
16#include "migration/qemu-file.h"
17#include "exec/cpu-common.h"
18#include "qemu/main-loop.h"
19#include "qemu/sockets.h"
20#include "qemu/bitmap.h"
21#include "block/coroutine.h"
22#include <stdio.h>
23#include <sys/types.h>
24#include <sys/socket.h>
25#include <netdb.h>
26#include <arpa/inet.h>
27#include <string.h>
28#include <rdma/rdma_cma.h>
733252de 29#include "trace.h"
2da776db
MH
30
31/*
32 * Print and error on both the Monitor and the Log file.
33 */
34#define ERROR(errp, fmt, ...) \
35 do { \
66988941 36 fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
2da776db
MH
37 if (errp && (*(errp) == NULL)) { \
38 error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
39 } \
40 } while (0)
41
42#define RDMA_RESOLVE_TIMEOUT_MS 10000
43
44/* Do not merge data if larger than this. */
45#define RDMA_MERGE_MAX (2 * 1024 * 1024)
46#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
47
48#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
49
50/*
51 * This is only for non-live state being migrated.
52 * Instead of RDMA_WRITE messages, we use RDMA_SEND
53 * messages for that state, which requires a different
54 * delivery design than main memory.
55 */
56#define RDMA_SEND_INCREMENT 32768
57
58/*
59 * Maximum size infiniband SEND message
60 */
61#define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
62#define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
63
64#define RDMA_CONTROL_VERSION_CURRENT 1
65/*
66 * Capabilities for negotiation.
67 */
68#define RDMA_CAPABILITY_PIN_ALL 0x01
69
70/*
71 * Add the other flags above to this list of known capabilities
72 * as they are introduced.
73 */
74static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
75
76#define CHECK_ERROR_STATE() \
77 do { \
78 if (rdma->error_state) { \
79 if (!rdma->error_reported) { \
733252de
DDAG
80 error_report("RDMA is in an error state waiting migration" \
81 " to abort!"); \
2da776db
MH
82 rdma->error_reported = 1; \
83 } \
84 return rdma->error_state; \
85 } \
86 } while (0);
87
88/*
89 * A work request ID is 64-bits and we split up these bits
90 * into 3 parts:
91 *
92 * bits 0-15 : type of control message, 2^16
93 * bits 16-29: ram block index, 2^14
94 * bits 30-63: ram block chunk number, 2^34
95 *
96 * The last two bit ranges are only used for RDMA writes,
97 * in order to track their completion and potentially
98 * also track unregistration status of the message.
99 */
100#define RDMA_WRID_TYPE_SHIFT 0UL
101#define RDMA_WRID_BLOCK_SHIFT 16UL
102#define RDMA_WRID_CHUNK_SHIFT 30UL
103
104#define RDMA_WRID_TYPE_MASK \
105 ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
106
107#define RDMA_WRID_BLOCK_MASK \
108 (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
109
110#define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
111
112/*
113 * RDMA migration protocol:
114 * 1. RDMA Writes (data messages, i.e. RAM)
115 * 2. IB Send/Recv (control channel messages)
116 */
117enum {
118 RDMA_WRID_NONE = 0,
119 RDMA_WRID_RDMA_WRITE = 1,
120 RDMA_WRID_SEND_CONTROL = 2000,
121 RDMA_WRID_RECV_CONTROL = 4000,
122};
123
124const char *wrid_desc[] = {
125 [RDMA_WRID_NONE] = "NONE",
126 [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
127 [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
128 [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV",
129};
130
131/*
132 * Work request IDs for IB SEND messages only (not RDMA writes).
133 * This is used by the migration protocol to transmit
134 * control messages (such as device state and registration commands)
135 *
136 * We could use more WRs, but we have enough for now.
137 */
138enum {
139 RDMA_WRID_READY = 0,
140 RDMA_WRID_DATA,
141 RDMA_WRID_CONTROL,
142 RDMA_WRID_MAX,
143};
144
145/*
146 * SEND/RECV IB Control Messages.
147 */
148enum {
149 RDMA_CONTROL_NONE = 0,
150 RDMA_CONTROL_ERROR,
151 RDMA_CONTROL_READY, /* ready to receive */
152 RDMA_CONTROL_QEMU_FILE, /* QEMUFile-transmitted bytes */
153 RDMA_CONTROL_RAM_BLOCKS_REQUEST, /* RAMBlock synchronization */
154 RDMA_CONTROL_RAM_BLOCKS_RESULT, /* RAMBlock synchronization */
155 RDMA_CONTROL_COMPRESS, /* page contains repeat values */
156 RDMA_CONTROL_REGISTER_REQUEST, /* dynamic page registration */
157 RDMA_CONTROL_REGISTER_RESULT, /* key to use after registration */
158 RDMA_CONTROL_REGISTER_FINISHED, /* current iteration finished */
159 RDMA_CONTROL_UNREGISTER_REQUEST, /* dynamic UN-registration */
160 RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
161};
162
163const char *control_desc[] = {
164 [RDMA_CONTROL_NONE] = "NONE",
165 [RDMA_CONTROL_ERROR] = "ERROR",
166 [RDMA_CONTROL_READY] = "READY",
167 [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE",
168 [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST",
169 [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT",
170 [RDMA_CONTROL_COMPRESS] = "COMPRESS",
171 [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST",
172 [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT",
173 [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED",
174 [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST",
175 [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED",
176};
177
178/*
179 * Memory and MR structures used to represent an IB Send/Recv work request.
180 * This is *not* used for RDMA writes, only IB Send/Recv.
181 */
182typedef struct {
183 uint8_t control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */
184 struct ibv_mr *control_mr; /* registration metadata */
185 size_t control_len; /* length of the message */
186 uint8_t *control_curr; /* start of unconsumed bytes */
187} RDMAWorkRequestData;
188
189/*
190 * Negotiate RDMA capabilities during connection-setup time.
191 */
192typedef struct {
193 uint32_t version;
194 uint32_t flags;
195} RDMACapabilities;
196
197static void caps_to_network(RDMACapabilities *cap)
198{
199 cap->version = htonl(cap->version);
200 cap->flags = htonl(cap->flags);
201}
202
203static void network_to_caps(RDMACapabilities *cap)
204{
205 cap->version = ntohl(cap->version);
206 cap->flags = ntohl(cap->flags);
207}
208
209/*
210 * Representation of a RAMBlock from an RDMA perspective.
211 * This is not transmitted, only local.
212 * This and subsequent structures cannot be linked lists
213 * because we're using a single IB message to transmit
214 * the information. It's small anyway, so a list is overkill.
215 */
216typedef struct RDMALocalBlock {
217 uint8_t *local_host_addr; /* local virtual address */
218 uint64_t remote_host_addr; /* remote virtual address */
219 uint64_t offset;
220 uint64_t length;
221 struct ibv_mr **pmr; /* MRs for chunk-level registration */
222 struct ibv_mr *mr; /* MR for non-chunk-level registration */
223 uint32_t *remote_keys; /* rkeys for chunk-level registration */
224 uint32_t remote_rkey; /* rkeys for non-chunk-level registration */
225 int index; /* which block are we */
226 bool is_ram_block;
227 int nb_chunks;
228 unsigned long *transit_bitmap;
229 unsigned long *unregister_bitmap;
230} RDMALocalBlock;
231
232/*
233 * Also represents a RAMblock, but only on the dest.
234 * This gets transmitted by the dest during connection-time
235 * to the source VM and then is used to populate the
236 * corresponding RDMALocalBlock with
237 * the information needed to perform the actual RDMA.
238 */
239typedef struct QEMU_PACKED RDMARemoteBlock {
240 uint64_t remote_host_addr;
241 uint64_t offset;
242 uint64_t length;
243 uint32_t remote_rkey;
244 uint32_t padding;
245} RDMARemoteBlock;
246
247static uint64_t htonll(uint64_t v)
248{
249 union { uint32_t lv[2]; uint64_t llv; } u;
250 u.lv[0] = htonl(v >> 32);
251 u.lv[1] = htonl(v & 0xFFFFFFFFULL);
252 return u.llv;
253}
254
255static uint64_t ntohll(uint64_t v) {
256 union { uint32_t lv[2]; uint64_t llv; } u;
257 u.llv = v;
258 return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]);
259}
260
261static void remote_block_to_network(RDMARemoteBlock *rb)
262{
263 rb->remote_host_addr = htonll(rb->remote_host_addr);
264 rb->offset = htonll(rb->offset);
265 rb->length = htonll(rb->length);
266 rb->remote_rkey = htonl(rb->remote_rkey);
267}
268
269static void network_to_remote_block(RDMARemoteBlock *rb)
270{
271 rb->remote_host_addr = ntohll(rb->remote_host_addr);
272 rb->offset = ntohll(rb->offset);
273 rb->length = ntohll(rb->length);
274 rb->remote_rkey = ntohl(rb->remote_rkey);
275}
276
277/*
278 * Virtual address of the above structures used for transmitting
279 * the RAMBlock descriptions at connection-time.
280 * This structure is *not* transmitted.
281 */
282typedef struct RDMALocalBlocks {
283 int nb_blocks;
284 bool init; /* main memory init complete */
285 RDMALocalBlock *block;
286} RDMALocalBlocks;
287
288/*
289 * Main data structure for RDMA state.
290 * While there is only one copy of this structure being allocated right now,
291 * this is the place where one would start if you wanted to consider
292 * having more than one RDMA connection open at the same time.
293 */
294typedef struct RDMAContext {
295 char *host;
296 int port;
297
1f22364b 298 RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
2da776db
MH
299
300 /*
301 * This is used by *_exchange_send() to figure out whether or not
302 * the initial "READY" message has already been received or not.
303 * This is because other functions may potentially poll() and detect
304 * the READY message before send() does, in which case we need to
305 * know if it completed.
306 */
307 int control_ready_expected;
308
309 /* number of outstanding writes */
310 int nb_sent;
311
312 /* store info about current buffer so that we can
313 merge it with future sends */
314 uint64_t current_addr;
315 uint64_t current_length;
316 /* index of ram block the current buffer belongs to */
317 int current_index;
318 /* index of the chunk in the current ram block */
319 int current_chunk;
320
321 bool pin_all;
322
323 /*
324 * infiniband-specific variables for opening the device
325 * and maintaining connection state and so forth.
326 *
327 * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
328 * cm_id->verbs, cm_id->channel, and cm_id->qp.
329 */
330 struct rdma_cm_id *cm_id; /* connection manager ID */
331 struct rdma_cm_id *listen_id;
5a91337c 332 bool connected;
2da776db
MH
333
334 struct ibv_context *verbs;
335 struct rdma_event_channel *channel;
336 struct ibv_qp *qp; /* queue pair */
337 struct ibv_comp_channel *comp_channel; /* completion channel */
338 struct ibv_pd *pd; /* protection domain */
339 struct ibv_cq *cq; /* completion queue */
340
341 /*
342 * If a previous write failed (perhaps because of a failed
343 * memory registration, then do not attempt any future work
344 * and remember the error state.
345 */
346 int error_state;
347 int error_reported;
348
349 /*
350 * Description of ram blocks used throughout the code.
351 */
352 RDMALocalBlocks local_ram_blocks;
353 RDMARemoteBlock *block;
354
355 /*
356 * Migration on *destination* started.
357 * Then use coroutine yield function.
358 * Source runs in a thread, so we don't care.
359 */
360 int migration_started_on_destination;
361
362 int total_registrations;
363 int total_writes;
364
365 int unregister_current, unregister_next;
366 uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
367
368 GHashTable *blockmap;
369} RDMAContext;
370
371/*
372 * Interface to the rest of the migration call stack.
373 */
374typedef struct QEMUFileRDMA {
375 RDMAContext *rdma;
376 size_t len;
377 void *file;
378} QEMUFileRDMA;
379
380/*
381 * Main structure for IB Send/Recv control messages.
382 * This gets prepended at the beginning of every Send/Recv.
383 */
384typedef struct QEMU_PACKED {
385 uint32_t len; /* Total length of data portion */
386 uint32_t type; /* which control command to perform */
387 uint32_t repeat; /* number of commands in data portion of same type */
388 uint32_t padding;
389} RDMAControlHeader;
390
391static void control_to_network(RDMAControlHeader *control)
392{
393 control->type = htonl(control->type);
394 control->len = htonl(control->len);
395 control->repeat = htonl(control->repeat);
396}
397
398static void network_to_control(RDMAControlHeader *control)
399{
400 control->type = ntohl(control->type);
401 control->len = ntohl(control->len);
402 control->repeat = ntohl(control->repeat);
403}
404
405/*
406 * Register a single Chunk.
407 * Information sent by the source VM to inform the dest
408 * to register an single chunk of memory before we can perform
409 * the actual RDMA operation.
410 */
411typedef struct QEMU_PACKED {
412 union QEMU_PACKED {
413 uint64_t current_addr; /* offset into the ramblock of the chunk */
414 uint64_t chunk; /* chunk to lookup if unregistering */
415 } key;
416 uint32_t current_index; /* which ramblock the chunk belongs to */
417 uint32_t padding;
418 uint64_t chunks; /* how many sequential chunks to register */
419} RDMARegister;
420
421static void register_to_network(RDMARegister *reg)
422{
423 reg->key.current_addr = htonll(reg->key.current_addr);
424 reg->current_index = htonl(reg->current_index);
425 reg->chunks = htonll(reg->chunks);
426}
427
428static void network_to_register(RDMARegister *reg)
429{
430 reg->key.current_addr = ntohll(reg->key.current_addr);
431 reg->current_index = ntohl(reg->current_index);
432 reg->chunks = ntohll(reg->chunks);
433}
434
435typedef struct QEMU_PACKED {
436 uint32_t value; /* if zero, we will madvise() */
437 uint32_t block_idx; /* which ram block index */
438 uint64_t offset; /* where in the remote ramblock this chunk */
439 uint64_t length; /* length of the chunk */
440} RDMACompress;
441
442static void compress_to_network(RDMACompress *comp)
443{
444 comp->value = htonl(comp->value);
445 comp->block_idx = htonl(comp->block_idx);
446 comp->offset = htonll(comp->offset);
447 comp->length = htonll(comp->length);
448}
449
450static void network_to_compress(RDMACompress *comp)
451{
452 comp->value = ntohl(comp->value);
453 comp->block_idx = ntohl(comp->block_idx);
454 comp->offset = ntohll(comp->offset);
455 comp->length = ntohll(comp->length);
456}
457
458/*
459 * The result of the dest's memory registration produces an "rkey"
460 * which the source VM must reference in order to perform
461 * the RDMA operation.
462 */
463typedef struct QEMU_PACKED {
464 uint32_t rkey;
465 uint32_t padding;
466 uint64_t host_addr;
467} RDMARegisterResult;
468
469static void result_to_network(RDMARegisterResult *result)
470{
471 result->rkey = htonl(result->rkey);
472 result->host_addr = htonll(result->host_addr);
473};
474
475static void network_to_result(RDMARegisterResult *result)
476{
477 result->rkey = ntohl(result->rkey);
478 result->host_addr = ntohll(result->host_addr);
479};
480
481const char *print_wrid(int wrid);
482static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
483 uint8_t *data, RDMAControlHeader *resp,
484 int *resp_idx,
485 int (*callback)(RDMAContext *rdma));
486
dd286ed7
IY
487static inline uint64_t ram_chunk_index(const uint8_t *start,
488 const uint8_t *host)
2da776db
MH
489{
490 return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
491}
492
dd286ed7 493static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
2da776db
MH
494 uint64_t i)
495{
496 return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
497 + (i << RDMA_REG_CHUNK_SHIFT));
498}
499
dd286ed7
IY
500static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
501 uint64_t i)
2da776db
MH
502{
503 uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
504 (1UL << RDMA_REG_CHUNK_SHIFT);
505
506 if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) {
507 result = rdma_ram_block->local_host_addr + rdma_ram_block->length;
508 }
509
510 return result;
511}
512
513static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr,
514 ram_addr_t block_offset, uint64_t length)
515{
516 RDMALocalBlocks *local = &rdma->local_ram_blocks;
517 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
518 (void *) block_offset);
519 RDMALocalBlock *old = local->block;
520
521 assert(block == NULL);
522
523 local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1));
524
525 if (local->nb_blocks) {
526 int x;
527
528 for (x = 0; x < local->nb_blocks; x++) {
529 g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
530 g_hash_table_insert(rdma->blockmap, (void *)old[x].offset,
531 &local->block[x]);
532 }
533 memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks);
534 g_free(old);
535 }
536
537 block = &local->block[local->nb_blocks];
538
539 block->local_host_addr = host_addr;
540 block->offset = block_offset;
541 block->length = length;
542 block->index = local->nb_blocks;
543 block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL;
544 block->transit_bitmap = bitmap_new(block->nb_chunks);
545 bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
546 block->unregister_bitmap = bitmap_new(block->nb_chunks);
547 bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
548 block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t));
549
550 block->is_ram_block = local->init ? false : true;
551
552 g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);
553
733252de
DDAG
554 trace___qemu_rdma_add_block(local->nb_blocks,
555 (uint64_t) block->local_host_addr, block->offset,
556 block->length,
557 (uint64_t) (block->local_host_addr + block->length),
558 BITS_TO_LONGS(block->nb_chunks) *
559 sizeof(unsigned long) * 8,
560 block->nb_chunks);
2da776db
MH
561
562 local->nb_blocks++;
563
564 return 0;
565}
566
567/*
568 * Memory regions need to be registered with the device and queue pairs setup
569 * in advanced before the migration starts. This tells us where the RAM blocks
570 * are so that we can register them individually.
571 */
572static void qemu_rdma_init_one_block(void *host_addr,
573 ram_addr_t block_offset, ram_addr_t length, void *opaque)
574{
575 __qemu_rdma_add_block(opaque, host_addr, block_offset, length);
576}
577
578/*
579 * Identify the RAMBlocks and their quantity. They will be references to
580 * identify chunk boundaries inside each RAMBlock and also be referenced
581 * during dynamic page registration.
582 */
583static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
584{
585 RDMALocalBlocks *local = &rdma->local_ram_blocks;
586
587 assert(rdma->blockmap == NULL);
588 rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
589 memset(local, 0, sizeof *local);
590 qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
733252de 591 trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
2da776db
MH
592 rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) *
593 rdma->local_ram_blocks.nb_blocks);
594 local->init = true;
595 return 0;
596}
597
598static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset)
599{
600 RDMALocalBlocks *local = &rdma->local_ram_blocks;
601 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
602 (void *) block_offset);
603 RDMALocalBlock *old = local->block;
604 int x;
605
606 assert(block);
607
608 if (block->pmr) {
609 int j;
610
611 for (j = 0; j < block->nb_chunks; j++) {
612 if (!block->pmr[j]) {
613 continue;
614 }
615 ibv_dereg_mr(block->pmr[j]);
616 rdma->total_registrations--;
617 }
618 g_free(block->pmr);
619 block->pmr = NULL;
620 }
621
622 if (block->mr) {
623 ibv_dereg_mr(block->mr);
624 rdma->total_registrations--;
625 block->mr = NULL;
626 }
627
628 g_free(block->transit_bitmap);
629 block->transit_bitmap = NULL;
630
631 g_free(block->unregister_bitmap);
632 block->unregister_bitmap = NULL;
633
634 g_free(block->remote_keys);
635 block->remote_keys = NULL;
636
637 for (x = 0; x < local->nb_blocks; x++) {
638 g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
639 }
640
641 if (local->nb_blocks > 1) {
642
643 local->block = g_malloc0(sizeof(RDMALocalBlock) *
644 (local->nb_blocks - 1));
645
646 if (block->index) {
647 memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
648 }
649
650 if (block->index < (local->nb_blocks - 1)) {
651 memcpy(local->block + block->index, old + (block->index + 1),
652 sizeof(RDMALocalBlock) *
653 (local->nb_blocks - (block->index + 1)));
654 }
655 } else {
656 assert(block == local->block);
657 local->block = NULL;
658 }
659
733252de
DDAG
660 trace___qemu_rdma_delete_block(local->nb_blocks,
661 (uint64_t)block->local_host_addr,
662 block->offset, block->length,
663 (uint64_t)(block->local_host_addr + block->length),
664 BITS_TO_LONGS(block->nb_chunks) *
665 sizeof(unsigned long) * 8, block->nb_chunks);
2da776db
MH
666
667 g_free(old);
668
669 local->nb_blocks--;
670
671 if (local->nb_blocks) {
672 for (x = 0; x < local->nb_blocks; x++) {
673 g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset,
674 &local->block[x]);
675 }
676 }
677
678 return 0;
679}
680
681/*
682 * Put in the log file which RDMA device was opened and the details
683 * associated with that device.
684 */
685static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
686{
7fc5b13f
MH
687 struct ibv_port_attr port;
688
689 if (ibv_query_port(verbs, 1, &port)) {
733252de 690 error_report("Failed to query port information");
7fc5b13f
MH
691 return;
692 }
693
2da776db
MH
694 printf("%s RDMA Device opened: kernel name %s "
695 "uverbs device name %s, "
7fc5b13f
MH
696 "infiniband_verbs class device path %s, "
697 "infiniband class device path %s, "
698 "transport: (%d) %s\n",
2da776db
MH
699 who,
700 verbs->device->name,
701 verbs->device->dev_name,
702 verbs->device->dev_path,
7fc5b13f
MH
703 verbs->device->ibdev_path,
704 port.link_layer,
705 (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" :
706 ((port.link_layer == IBV_LINK_LAYER_ETHERNET)
707 ? "Ethernet" : "Unknown"));
2da776db
MH
708}
709
710/*
711 * Put in the log file the RDMA gid addressing information,
712 * useful for folks who have trouble understanding the
713 * RDMA device hierarchy in the kernel.
714 */
715static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
716{
717 char sgid[33];
718 char dgid[33];
719 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
720 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
733252de 721 trace_qemu_rdma_dump_gid(who, sgid, dgid);
2da776db
MH
722}
723
7fc5b13f
MH
724/*
725 * As of now, IPv6 over RoCE / iWARP is not supported by linux.
726 * We will try the next addrinfo struct, and fail if there are
727 * no other valid addresses to bind against.
728 *
729 * If user is listening on '[::]', then we will not have a opened a device
730 * yet and have no way of verifying if the device is RoCE or not.
731 *
732 * In this case, the source VM will throw an error for ALL types of
733 * connections (both IPv4 and IPv6) if the destination machine does not have
734 * a regular infiniband network available for use.
735 *
4c293dc6 736 * The only way to guarantee that an error is thrown for broken kernels is
7fc5b13f
MH
737 * for the management software to choose a *specific* interface at bind time
738 * and validate what time of hardware it is.
739 *
740 * Unfortunately, this puts the user in a fix:
741 *
742 * If the source VM connects with an IPv4 address without knowing that the
743 * destination has bound to '[::]' the migration will unconditionally fail
744 * unless the management software is explicitly listening on the the IPv4
745 * address while using a RoCE-based device.
746 *
747 * If the source VM connects with an IPv6 address, then we're OK because we can
748 * throw an error on the source (and similarly on the destination).
749 *
750 * But in mixed environments, this will be broken for a while until it is fixed
751 * inside linux.
752 *
753 * We do provide a *tiny* bit of help in this function: We can list all of the
754 * devices in the system and check to see if all the devices are RoCE or
755 * Infiniband.
756 *
757 * If we detect that we have a *pure* RoCE environment, then we can safely
4c293dc6 758 * thrown an error even if the management software has specified '[::]' as the
7fc5b13f
MH
759 * bind address.
760 *
761 * However, if there is are multiple hetergeneous devices, then we cannot make
762 * this assumption and the user just has to be sure they know what they are
763 * doing.
764 *
765 * Patches are being reviewed on linux-rdma.
766 */
767static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs)
768{
769 struct ibv_port_attr port_attr;
770
771 /* This bug only exists in linux, to our knowledge. */
772#ifdef CONFIG_LINUX
773
774 /*
775 * Verbs are only NULL if management has bound to '[::]'.
776 *
777 * Let's iterate through all the devices and see if there any pure IB
778 * devices (non-ethernet).
779 *
780 * If not, then we can safely proceed with the migration.
4c293dc6 781 * Otherwise, there are no guarantees until the bug is fixed in linux.
7fc5b13f
MH
782 */
783 if (!verbs) {
784 int num_devices, x;
785 struct ibv_device ** dev_list = ibv_get_device_list(&num_devices);
786 bool roce_found = false;
787 bool ib_found = false;
788
789 for (x = 0; x < num_devices; x++) {
790 verbs = ibv_open_device(dev_list[x]);
791
792 if (ibv_query_port(verbs, 1, &port_attr)) {
793 ibv_close_device(verbs);
794 ERROR(errp, "Could not query initial IB port");
795 return -EINVAL;
796 }
797
798 if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
799 ib_found = true;
800 } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
801 roce_found = true;
802 }
803
804 ibv_close_device(verbs);
805
806 }
807
808 if (roce_found) {
809 if (ib_found) {
810 fprintf(stderr, "WARN: migrations may fail:"
811 " IPv6 over RoCE / iWARP in linux"
812 " is broken. But since you appear to have a"
813 " mixed RoCE / IB environment, be sure to only"
814 " migrate over the IB fabric until the kernel "
815 " fixes the bug.\n");
816 } else {
817 ERROR(errp, "You only have RoCE / iWARP devices in your systems"
818 " and your management software has specified '[::]'"
819 ", but IPv6 over RoCE / iWARP is not supported in Linux.");
820 return -ENONET;
821 }
822 }
823
824 return 0;
825 }
826
827 /*
828 * If we have a verbs context, that means that some other than '[::]' was
829 * used by the management software for binding. In which case we can actually
830 * warn the user about a potential broken kernel;
831 */
832
833 /* IB ports start with 1, not 0 */
834 if (ibv_query_port(verbs, 1, &port_attr)) {
835 ERROR(errp, "Could not query initial IB port");
836 return -EINVAL;
837 }
838
839 if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
840 ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 "
841 "(but patches on linux-rdma in progress)");
842 return -ENONET;
843 }
844
845#endif
846
847 return 0;
848}
849
2da776db
MH
850/*
851 * Figure out which RDMA device corresponds to the requested IP hostname
852 * Also create the initial connection manager identifiers for opening
853 * the connection.
854 */
855static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
856{
857 int ret;
7fc5b13f 858 struct rdma_addrinfo *res;
2da776db
MH
859 char port_str[16];
860 struct rdma_cm_event *cm_event;
861 char ip[40] = "unknown";
7fc5b13f 862 struct rdma_addrinfo *e;
2da776db
MH
863
864 if (rdma->host == NULL || !strcmp(rdma->host, "")) {
66988941 865 ERROR(errp, "RDMA hostname has not been set");
7fc5b13f 866 return -EINVAL;
2da776db
MH
867 }
868
869 /* create CM channel */
870 rdma->channel = rdma_create_event_channel();
871 if (!rdma->channel) {
66988941 872 ERROR(errp, "could not create CM channel");
7fc5b13f 873 return -EINVAL;
2da776db
MH
874 }
875
876 /* create CM id */
877 ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
878 if (ret) {
66988941 879 ERROR(errp, "could not create channel id");
2da776db
MH
880 goto err_resolve_create_id;
881 }
882
883 snprintf(port_str, 16, "%d", rdma->port);
884 port_str[15] = '\0';
885
7fc5b13f 886 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
2da776db 887 if (ret < 0) {
7fc5b13f 888 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2da776db
MH
889 goto err_resolve_get_addr;
890 }
891
6470215b
MH
892 for (e = res; e != NULL; e = e->ai_next) {
893 inet_ntop(e->ai_family,
7fc5b13f 894 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
733252de 895 trace_qemu_rdma_resolve_host_trying(rdma->host, ip);
2da776db 896
7fc5b13f 897 ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
6470215b
MH
898 RDMA_RESOLVE_TIMEOUT_MS);
899 if (!ret) {
c89aa2f1
MH
900 if (e->ai_family == AF_INET6) {
901 ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
902 if (ret) {
903 continue;
904 }
7fc5b13f 905 }
6470215b
MH
906 goto route;
907 }
2da776db
MH
908 }
909
6470215b
MH
910 ERROR(errp, "could not resolve address %s", rdma->host);
911 goto err_resolve_get_addr;
912
913route:
2da776db
MH
914 qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
915
916 ret = rdma_get_cm_event(rdma->channel, &cm_event);
917 if (ret) {
66988941 918 ERROR(errp, "could not perform event_addr_resolved");
2da776db
MH
919 goto err_resolve_get_addr;
920 }
921
922 if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
66988941 923 ERROR(errp, "result not equal to event_addr_resolved %s",
2da776db
MH
924 rdma_event_str(cm_event->event));
925 perror("rdma_resolve_addr");
2a934347 926 rdma_ack_cm_event(cm_event);
7fc5b13f 927 ret = -EINVAL;
2da776db
MH
928 goto err_resolve_get_addr;
929 }
930 rdma_ack_cm_event(cm_event);
931
932 /* resolve route */
933 ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
934 if (ret) {
66988941 935 ERROR(errp, "could not resolve rdma route");
2da776db
MH
936 goto err_resolve_get_addr;
937 }
938
939 ret = rdma_get_cm_event(rdma->channel, &cm_event);
940 if (ret) {
66988941 941 ERROR(errp, "could not perform event_route_resolved");
2da776db
MH
942 goto err_resolve_get_addr;
943 }
944 if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
66988941 945 ERROR(errp, "result not equal to event_route_resolved: %s",
2da776db
MH
946 rdma_event_str(cm_event->event));
947 rdma_ack_cm_event(cm_event);
7fc5b13f 948 ret = -EINVAL;
2da776db
MH
949 goto err_resolve_get_addr;
950 }
951 rdma_ack_cm_event(cm_event);
952 rdma->verbs = rdma->cm_id->verbs;
953 qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs);
954 qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id);
955 return 0;
956
957err_resolve_get_addr:
958 rdma_destroy_id(rdma->cm_id);
959 rdma->cm_id = NULL;
960err_resolve_create_id:
961 rdma_destroy_event_channel(rdma->channel);
962 rdma->channel = NULL;
7fc5b13f 963 return ret;
2da776db
MH
964}
965
966/*
967 * Create protection domain and completion queues
968 */
969static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
970{
971 /* allocate pd */
972 rdma->pd = ibv_alloc_pd(rdma->verbs);
973 if (!rdma->pd) {
733252de 974 error_report("failed to allocate protection domain");
2da776db
MH
975 return -1;
976 }
977
978 /* create completion channel */
979 rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
980 if (!rdma->comp_channel) {
733252de 981 error_report("failed to allocate completion channel");
2da776db
MH
982 goto err_alloc_pd_cq;
983 }
984
985 /*
986 * Completion queue can be filled by both read and write work requests,
987 * so must reflect the sum of both possible queue sizes.
988 */
989 rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
990 NULL, rdma->comp_channel, 0);
991 if (!rdma->cq) {
733252de 992 error_report("failed to allocate completion queue");
2da776db
MH
993 goto err_alloc_pd_cq;
994 }
995
996 return 0;
997
998err_alloc_pd_cq:
999 if (rdma->pd) {
1000 ibv_dealloc_pd(rdma->pd);
1001 }
1002 if (rdma->comp_channel) {
1003 ibv_destroy_comp_channel(rdma->comp_channel);
1004 }
1005 rdma->pd = NULL;
1006 rdma->comp_channel = NULL;
1007 return -1;
1008
1009}
1010
1011/*
1012 * Create queue pairs.
1013 */
1014static int qemu_rdma_alloc_qp(RDMAContext *rdma)
1015{
1016 struct ibv_qp_init_attr attr = { 0 };
1017 int ret;
1018
1019 attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX;
1020 attr.cap.max_recv_wr = 3;
1021 attr.cap.max_send_sge = 1;
1022 attr.cap.max_recv_sge = 1;
1023 attr.send_cq = rdma->cq;
1024 attr.recv_cq = rdma->cq;
1025 attr.qp_type = IBV_QPT_RC;
1026
1027 ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
1028 if (ret) {
1029 return -1;
1030 }
1031
1032 rdma->qp = rdma->cm_id->qp;
1033 return 0;
1034}
1035
1036static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
1037{
1038 int i;
1039 RDMALocalBlocks *local = &rdma->local_ram_blocks;
1040
1041 for (i = 0; i < local->nb_blocks; i++) {
1042 local->block[i].mr =
1043 ibv_reg_mr(rdma->pd,
1044 local->block[i].local_host_addr,
1045 local->block[i].length,
1046 IBV_ACCESS_LOCAL_WRITE |
1047 IBV_ACCESS_REMOTE_WRITE
1048 );
1049 if (!local->block[i].mr) {
1050 perror("Failed to register local dest ram block!\n");
1051 break;
1052 }
1053 rdma->total_registrations++;
1054 }
1055
1056 if (i >= local->nb_blocks) {
1057 return 0;
1058 }
1059
1060 for (i--; i >= 0; i--) {
1061 ibv_dereg_mr(local->block[i].mr);
1062 rdma->total_registrations--;
1063 }
1064
1065 return -1;
1066
1067}
1068
1069/*
1070 * Find the ram block that corresponds to the page requested to be
1071 * transmitted by QEMU.
1072 *
1073 * Once the block is found, also identify which 'chunk' within that
1074 * block that the page belongs to.
1075 *
1076 * This search cannot fail or the migration will fail.
1077 */
1078static int qemu_rdma_search_ram_block(RDMAContext *rdma,
1079 uint64_t block_offset,
1080 uint64_t offset,
1081 uint64_t length,
1082 uint64_t *block_index,
1083 uint64_t *chunk_index)
1084{
1085 uint64_t current_addr = block_offset + offset;
1086 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
1087 (void *) block_offset);
1088 assert(block);
1089 assert(current_addr >= block->offset);
1090 assert((current_addr + length) <= (block->offset + block->length));
1091
1092 *block_index = block->index;
1093 *chunk_index = ram_chunk_index(block->local_host_addr,
1094 block->local_host_addr + (current_addr - block->offset));
1095
1096 return 0;
1097}
1098
1099/*
1100 * Register a chunk with IB. If the chunk was already registered
1101 * previously, then skip.
1102 *
1103 * Also return the keys associated with the registration needed
1104 * to perform the actual RDMA operation.
1105 */
1106static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
1107 RDMALocalBlock *block, uint8_t *host_addr,
1108 uint32_t *lkey, uint32_t *rkey, int chunk,
1109 uint8_t *chunk_start, uint8_t *chunk_end)
1110{
1111 if (block->mr) {
1112 if (lkey) {
1113 *lkey = block->mr->lkey;
1114 }
1115 if (rkey) {
1116 *rkey = block->mr->rkey;
1117 }
1118 return 0;
1119 }
1120
1121 /* allocate memory to store chunk MRs */
1122 if (!block->pmr) {
1123 block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
2da776db
MH
1124 }
1125
1126 /*
1127 * If 'rkey', then we're the destination, so grant access to the source.
1128 *
1129 * If 'lkey', then we're the source VM, so grant access only to ourselves.
1130 */
1131 if (!block->pmr[chunk]) {
1132 uint64_t len = chunk_end - chunk_start;
1133
733252de 1134 trace_qemu_rdma_register_and_get_keys(len, chunk_start);
2da776db
MH
1135
1136 block->pmr[chunk] = ibv_reg_mr(rdma->pd,
1137 chunk_start, len,
1138 (rkey ? (IBV_ACCESS_LOCAL_WRITE |
1139 IBV_ACCESS_REMOTE_WRITE) : 0));
1140
1141 if (!block->pmr[chunk]) {
1142 perror("Failed to register chunk!");
1143 fprintf(stderr, "Chunk details: block: %d chunk index %d"
1144 " start %" PRIu64 " end %" PRIu64 " host %" PRIu64
1145 " local %" PRIu64 " registrations: %d\n",
1146 block->index, chunk, (uint64_t) chunk_start,
1147 (uint64_t) chunk_end, (uint64_t) host_addr,
1148 (uint64_t) block->local_host_addr,
1149 rdma->total_registrations);
1150 return -1;
1151 }
1152 rdma->total_registrations++;
1153 }
1154
1155 if (lkey) {
1156 *lkey = block->pmr[chunk]->lkey;
1157 }
1158 if (rkey) {
1159 *rkey = block->pmr[chunk]->rkey;
1160 }
1161 return 0;
1162}
1163
1164/*
1165 * Register (at connection time) the memory used for control
1166 * channel messages.
1167 */
1168static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
1169{
1170 rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd,
1171 rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER,
1172 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
1173 if (rdma->wr_data[idx].control_mr) {
1174 rdma->total_registrations++;
1175 return 0;
1176 }
733252de 1177 error_report("qemu_rdma_reg_control failed");
2da776db
MH
1178 return -1;
1179}
1180
1181const char *print_wrid(int wrid)
1182{
1183 if (wrid >= RDMA_WRID_RECV_CONTROL) {
1184 return wrid_desc[RDMA_WRID_RECV_CONTROL];
1185 }
1186 return wrid_desc[wrid];
1187}
1188
1189/*
1190 * RDMA requires memory registration (mlock/pinning), but this is not good for
1191 * overcommitment.
1192 *
1193 * In preparation for the future where LRU information or workload-specific
1194 * writable writable working set memory access behavior is available to QEMU
1195 * it would be nice to have in place the ability to UN-register/UN-pin
1196 * particular memory regions from the RDMA hardware when it is determine that
1197 * those regions of memory will likely not be accessed again in the near future.
1198 *
1199 * While we do not yet have such information right now, the following
1200 * compile-time option allows us to perform a non-optimized version of this
1201 * behavior.
1202 *
1203 * By uncommenting this option, you will cause *all* RDMA transfers to be
1204 * unregistered immediately after the transfer completes on both sides of the
1205 * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
1206 *
1207 * This will have a terrible impact on migration performance, so until future
1208 * workload information or LRU information is available, do not attempt to use
1209 * this feature except for basic testing.
1210 */
1211//#define RDMA_UNREGISTRATION_EXAMPLE
1212
1213/*
1214 * Perform a non-optimized memory unregistration after every transfer
1215 * for demonsration purposes, only if pin-all is not requested.
1216 *
1217 * Potential optimizations:
1218 * 1. Start a new thread to run this function continuously
1219 - for bit clearing
1220 - and for receipt of unregister messages
1221 * 2. Use an LRU.
1222 * 3. Use workload hints.
1223 */
1224static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
1225{
1226 while (rdma->unregistrations[rdma->unregister_current]) {
1227 int ret;
1228 uint64_t wr_id = rdma->unregistrations[rdma->unregister_current];
1229 uint64_t chunk =
1230 (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1231 uint64_t index =
1232 (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1233 RDMALocalBlock *block =
1234 &(rdma->local_ram_blocks.block[index]);
1235 RDMARegister reg = { .current_index = index };
1236 RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED,
1237 };
1238 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1239 .type = RDMA_CONTROL_UNREGISTER_REQUEST,
1240 .repeat = 1,
1241 };
1242
733252de
DDAG
1243 trace_qemu_rdma_unregister_waiting_proc(chunk,
1244 rdma->unregister_current);
2da776db
MH
1245
1246 rdma->unregistrations[rdma->unregister_current] = 0;
1247 rdma->unregister_current++;
1248
1249 if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) {
1250 rdma->unregister_current = 0;
1251 }
1252
1253
1254 /*
1255 * Unregistration is speculative (because migration is single-threaded
1256 * and we cannot break the protocol's inifinband message ordering).
1257 * Thus, if the memory is currently being used for transmission,
1258 * then abort the attempt to unregister and try again
1259 * later the next time a completion is received for this memory.
1260 */
1261 clear_bit(chunk, block->unregister_bitmap);
1262
1263 if (test_bit(chunk, block->transit_bitmap)) {
733252de 1264 trace_qemu_rdma_unregister_waiting_inflight(chunk);
2da776db
MH
1265 continue;
1266 }
1267
733252de 1268 trace_qemu_rdma_unregister_waiting_send(chunk);
2da776db
MH
1269
1270 ret = ibv_dereg_mr(block->pmr[chunk]);
1271 block->pmr[chunk] = NULL;
1272 block->remote_keys[chunk] = 0;
1273
1274 if (ret != 0) {
1275 perror("unregistration chunk failed");
1276 return -ret;
1277 }
1278 rdma->total_registrations--;
1279
1280 reg.key.chunk = chunk;
1281 register_to_network(&reg);
1282 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
1283 &resp, NULL, NULL);
1284 if (ret < 0) {
1285 return ret;
1286 }
1287
733252de 1288 trace_qemu_rdma_unregister_waiting_complete(chunk);
2da776db
MH
1289 }
1290
1291 return 0;
1292}
1293
1294static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
1295 uint64_t chunk)
1296{
1297 uint64_t result = wr_id & RDMA_WRID_TYPE_MASK;
1298
1299 result |= (index << RDMA_WRID_BLOCK_SHIFT);
1300 result |= (chunk << RDMA_WRID_CHUNK_SHIFT);
1301
1302 return result;
1303}
1304
1305/*
1306 * Set bit for unregistration in the next iteration.
1307 * We cannot transmit right here, but will unpin later.
1308 */
1309static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
1310 uint64_t chunk, uint64_t wr_id)
1311{
1312 if (rdma->unregistrations[rdma->unregister_next] != 0) {
733252de 1313 error_report("rdma migration: queue is full");
2da776db
MH
1314 } else {
1315 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1316
1317 if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
733252de
DDAG
1318 trace_qemu_rdma_signal_unregister_append(chunk,
1319 rdma->unregister_next);
2da776db
MH
1320
1321 rdma->unregistrations[rdma->unregister_next++] =
1322 qemu_rdma_make_wrid(wr_id, index, chunk);
1323
1324 if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
1325 rdma->unregister_next = 0;
1326 }
1327 } else {
733252de 1328 trace_qemu_rdma_signal_unregister_already(chunk);
2da776db
MH
1329 }
1330 }
1331}
1332
1333/*
1334 * Consult the connection manager to see a work request
1335 * (of any kind) has completed.
1336 * Return the work request ID that completed.
1337 */
88571882
IY
1338static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
1339 uint32_t *byte_len)
2da776db
MH
1340{
1341 int ret;
1342 struct ibv_wc wc;
1343 uint64_t wr_id;
1344
1345 ret = ibv_poll_cq(rdma->cq, 1, &wc);
1346
1347 if (!ret) {
1348 *wr_id_out = RDMA_WRID_NONE;
1349 return 0;
1350 }
1351
1352 if (ret < 0) {
733252de 1353 error_report("ibv_poll_cq return %d", ret);
2da776db
MH
1354 return ret;
1355 }
1356
1357 wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
1358
1359 if (wc.status != IBV_WC_SUCCESS) {
1360 fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n",
1361 wc.status, ibv_wc_status_str(wc.status));
1362 fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]);
1363
1364 return -1;
1365 }
1366
1367 if (rdma->control_ready_expected &&
1368 (wr_id >= RDMA_WRID_RECV_CONTROL)) {
733252de 1369 trace_qemu_rdma_poll_recv(wrid_desc[RDMA_WRID_RECV_CONTROL],
2da776db
MH
1370 wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
1371 rdma->control_ready_expected = 0;
1372 }
1373
1374 if (wr_id == RDMA_WRID_RDMA_WRITE) {
1375 uint64_t chunk =
1376 (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1377 uint64_t index =
1378 (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1379 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1380
733252de
DDAG
1381 trace_qemu_rdma_poll_write(print_wrid(wr_id), wr_id, rdma->nb_sent,
1382 index, chunk,
2da776db
MH
1383 block->local_host_addr, (void *)block->remote_host_addr);
1384
1385 clear_bit(chunk, block->transit_bitmap);
1386
1387 if (rdma->nb_sent > 0) {
1388 rdma->nb_sent--;
1389 }
1390
1391 if (!rdma->pin_all) {
1392 /*
1393 * FYI: If one wanted to signal a specific chunk to be unregistered
1394 * using LRU or workload-specific information, this is the function
1395 * you would call to do so. That chunk would then get asynchronously
1396 * unregistered later.
1397 */
1398#ifdef RDMA_UNREGISTRATION_EXAMPLE
1399 qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id);
1400#endif
1401 }
1402 } else {
733252de 1403 trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent);
2da776db
MH
1404 }
1405
1406 *wr_id_out = wc.wr_id;
88571882
IY
1407 if (byte_len) {
1408 *byte_len = wc.byte_len;
1409 }
2da776db
MH
1410
1411 return 0;
1412}
1413
1414/*
1415 * Block until the next work request has completed.
1416 *
1417 * First poll to see if a work request has already completed,
1418 * otherwise block.
1419 *
1420 * If we encounter completed work requests for IDs other than
1421 * the one we're interested in, then that's generally an error.
1422 *
1423 * The only exception is actual RDMA Write completions. These
1424 * completions only need to be recorded, but do not actually
1425 * need further processing.
1426 */
88571882
IY
1427static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
1428 uint32_t *byte_len)
2da776db
MH
1429{
1430 int num_cq_events = 0, ret = 0;
1431 struct ibv_cq *cq;
1432 void *cq_ctx;
1433 uint64_t wr_id = RDMA_WRID_NONE, wr_id_in;
1434
1435 if (ibv_req_notify_cq(rdma->cq, 0)) {
1436 return -1;
1437 }
1438 /* poll cq first */
1439 while (wr_id != wrid_requested) {
88571882 1440 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1441 if (ret < 0) {
1442 return ret;
1443 }
1444
1445 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1446
1447 if (wr_id == RDMA_WRID_NONE) {
1448 break;
1449 }
1450 if (wr_id != wrid_requested) {
733252de
DDAG
1451 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1452 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1453 }
1454 }
1455
1456 if (wr_id == wrid_requested) {
1457 return 0;
1458 }
1459
1460 while (1) {
1461 /*
1462 * Coroutine doesn't start until process_incoming_migration()
1463 * so don't yield unless we know we're running inside of a coroutine.
1464 */
1465 if (rdma->migration_started_on_destination) {
1466 yield_until_fd_readable(rdma->comp_channel->fd);
1467 }
1468
1469 if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) {
1470 perror("ibv_get_cq_event");
1471 goto err_block_for_wrid;
1472 }
1473
1474 num_cq_events++;
1475
1476 if (ibv_req_notify_cq(cq, 0)) {
1477 goto err_block_for_wrid;
1478 }
1479
1480 while (wr_id != wrid_requested) {
88571882 1481 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1482 if (ret < 0) {
1483 goto err_block_for_wrid;
1484 }
1485
1486 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1487
1488 if (wr_id == RDMA_WRID_NONE) {
1489 break;
1490 }
1491 if (wr_id != wrid_requested) {
733252de
DDAG
1492 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1493 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1494 }
1495 }
1496
1497 if (wr_id == wrid_requested) {
1498 goto success_block_for_wrid;
1499 }
1500 }
1501
1502success_block_for_wrid:
1503 if (num_cq_events) {
1504 ibv_ack_cq_events(cq, num_cq_events);
1505 }
1506 return 0;
1507
1508err_block_for_wrid:
1509 if (num_cq_events) {
1510 ibv_ack_cq_events(cq, num_cq_events);
1511 }
1512 return ret;
1513}
1514
1515/*
1516 * Post a SEND message work request for the control channel
1517 * containing some data and block until the post completes.
1518 */
1519static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
1520 RDMAControlHeader *head)
1521{
1522 int ret = 0;
1f22364b 1523 RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL];
2da776db
MH
1524 struct ibv_send_wr *bad_wr;
1525 struct ibv_sge sge = {
1526 .addr = (uint64_t)(wr->control),
1527 .length = head->len + sizeof(RDMAControlHeader),
1528 .lkey = wr->control_mr->lkey,
1529 };
1530 struct ibv_send_wr send_wr = {
1531 .wr_id = RDMA_WRID_SEND_CONTROL,
1532 .opcode = IBV_WR_SEND,
1533 .send_flags = IBV_SEND_SIGNALED,
1534 .sg_list = &sge,
1535 .num_sge = 1,
1536 };
1537
733252de 1538 trace_qemu_rdma_post_send_control(control_desc[head->type]);
2da776db
MH
1539
1540 /*
1541 * We don't actually need to do a memcpy() in here if we used
1542 * the "sge" properly, but since we're only sending control messages
1543 * (not RAM in a performance-critical path), then its OK for now.
1544 *
1545 * The copy makes the RDMAControlHeader simpler to manipulate
1546 * for the time being.
1547 */
6f1484ed 1548 assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head));
2da776db
MH
1549 memcpy(wr->control, head, sizeof(RDMAControlHeader));
1550 control_to_network((void *) wr->control);
1551
1552 if (buf) {
1553 memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len);
1554 }
1555
1556
e325b49a 1557 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
2da776db 1558
e325b49a 1559 if (ret > 0) {
733252de 1560 error_report("Failed to use post IB SEND for control");
e325b49a 1561 return -ret;
2da776db
MH
1562 }
1563
88571882 1564 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
2da776db 1565 if (ret < 0) {
733252de 1566 error_report("rdma migration: send polling control error");
2da776db
MH
1567 }
1568
1569 return ret;
1570}
1571
1572/*
1573 * Post a RECV work request in anticipation of some future receipt
1574 * of data on the control channel.
1575 */
1576static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx)
1577{
1578 struct ibv_recv_wr *bad_wr;
1579 struct ibv_sge sge = {
1580 .addr = (uint64_t)(rdma->wr_data[idx].control),
1581 .length = RDMA_CONTROL_MAX_BUFFER,
1582 .lkey = rdma->wr_data[idx].control_mr->lkey,
1583 };
1584
1585 struct ibv_recv_wr recv_wr = {
1586 .wr_id = RDMA_WRID_RECV_CONTROL + idx,
1587 .sg_list = &sge,
1588 .num_sge = 1,
1589 };
1590
1591
1592 if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) {
1593 return -1;
1594 }
1595
1596 return 0;
1597}
1598
1599/*
1600 * Block and wait for a RECV control channel message to arrive.
1601 */
1602static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
1603 RDMAControlHeader *head, int expecting, int idx)
1604{
88571882
IY
1605 uint32_t byte_len;
1606 int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
1607 &byte_len);
2da776db
MH
1608
1609 if (ret < 0) {
733252de 1610 error_report("rdma migration: recv polling control error!");
2da776db
MH
1611 return ret;
1612 }
1613
1614 network_to_control((void *) rdma->wr_data[idx].control);
1615 memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
1616
733252de 1617 trace_qemu_rdma_exchange_get_response_start(control_desc[expecting]);
2da776db
MH
1618
1619 if (expecting == RDMA_CONTROL_NONE) {
733252de
DDAG
1620 trace_qemu_rdma_exchange_get_response_none(control_desc[head->type],
1621 head->type);
2da776db 1622 } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
733252de
DDAG
1623 error_report("Was expecting a %s (%d) control message"
1624 ", but got: %s (%d), length: %d",
2da776db
MH
1625 control_desc[expecting], expecting,
1626 control_desc[head->type], head->type, head->len);
1627 return -EIO;
1628 }
6f1484ed 1629 if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
733252de 1630 error_report("too long length: %d\n", head->len);
6f1484ed
IY
1631 return -EINVAL;
1632 }
88571882 1633 if (sizeof(*head) + head->len != byte_len) {
733252de 1634 error_report("Malformed length: %d byte_len %d", head->len, byte_len);
88571882
IY
1635 return -EINVAL;
1636 }
2da776db
MH
1637
1638 return 0;
1639}
1640
1641/*
1642 * When a RECV work request has completed, the work request's
1643 * buffer is pointed at the header.
1644 *
1645 * This will advance the pointer to the data portion
1646 * of the control message of the work request's buffer that
1647 * was populated after the work request finished.
1648 */
1649static void qemu_rdma_move_header(RDMAContext *rdma, int idx,
1650 RDMAControlHeader *head)
1651{
1652 rdma->wr_data[idx].control_len = head->len;
1653 rdma->wr_data[idx].control_curr =
1654 rdma->wr_data[idx].control + sizeof(RDMAControlHeader);
1655}
1656
1657/*
1658 * This is an 'atomic' high-level operation to deliver a single, unified
1659 * control-channel message.
1660 *
1661 * Additionally, if the user is expecting some kind of reply to this message,
1662 * they can request a 'resp' response message be filled in by posting an
1663 * additional work request on behalf of the user and waiting for an additional
1664 * completion.
1665 *
1666 * The extra (optional) response is used during registration to us from having
1667 * to perform an *additional* exchange of message just to provide a response by
1668 * instead piggy-backing on the acknowledgement.
1669 */
1670static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
1671 uint8_t *data, RDMAControlHeader *resp,
1672 int *resp_idx,
1673 int (*callback)(RDMAContext *rdma))
1674{
1675 int ret = 0;
1676
1677 /*
1678 * Wait until the dest is ready before attempting to deliver the message
1679 * by waiting for a READY message.
1680 */
1681 if (rdma->control_ready_expected) {
1682 RDMAControlHeader resp;
1683 ret = qemu_rdma_exchange_get_response(rdma,
1684 &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
1685 if (ret < 0) {
1686 return ret;
1687 }
1688 }
1689
1690 /*
1691 * If the user is expecting a response, post a WR in anticipation of it.
1692 */
1693 if (resp) {
1694 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
1695 if (ret) {
733252de 1696 error_report("rdma migration: error posting"
2da776db
MH
1697 " extra control recv for anticipated result!");
1698 return ret;
1699 }
1700 }
1701
1702 /*
1703 * Post a WR to replace the one we just consumed for the READY message.
1704 */
1705 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1706 if (ret) {
733252de 1707 error_report("rdma migration: error posting first control recv!");
2da776db
MH
1708 return ret;
1709 }
1710
1711 /*
1712 * Deliver the control message that was requested.
1713 */
1714 ret = qemu_rdma_post_send_control(rdma, data, head);
1715
1716 if (ret < 0) {
733252de 1717 error_report("Failed to send control buffer!");
2da776db
MH
1718 return ret;
1719 }
1720
1721 /*
1722 * If we're expecting a response, block and wait for it.
1723 */
1724 if (resp) {
1725 if (callback) {
733252de 1726 trace_qemu_rdma_exchange_send_issue_callback();
2da776db
MH
1727 ret = callback(rdma);
1728 if (ret < 0) {
1729 return ret;
1730 }
1731 }
1732
733252de 1733 trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]);
2da776db
MH
1734 ret = qemu_rdma_exchange_get_response(rdma, resp,
1735 resp->type, RDMA_WRID_DATA);
1736
1737 if (ret < 0) {
1738 return ret;
1739 }
1740
1741 qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
1742 if (resp_idx) {
1743 *resp_idx = RDMA_WRID_DATA;
1744 }
733252de 1745 trace_qemu_rdma_exchange_send_received(control_desc[resp->type]);
2da776db
MH
1746 }
1747
1748 rdma->control_ready_expected = 1;
1749
1750 return 0;
1751}
1752
1753/*
1754 * This is an 'atomic' high-level operation to receive a single, unified
1755 * control-channel message.
1756 */
1757static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
1758 int expecting)
1759{
1760 RDMAControlHeader ready = {
1761 .len = 0,
1762 .type = RDMA_CONTROL_READY,
1763 .repeat = 1,
1764 };
1765 int ret;
1766
1767 /*
1768 * Inform the source that we're ready to receive a message.
1769 */
1770 ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
1771
1772 if (ret < 0) {
733252de 1773 error_report("Failed to send control buffer!");
2da776db
MH
1774 return ret;
1775 }
1776
1777 /*
1778 * Block and wait for the message.
1779 */
1780 ret = qemu_rdma_exchange_get_response(rdma, head,
1781 expecting, RDMA_WRID_READY);
1782
1783 if (ret < 0) {
1784 return ret;
1785 }
1786
1787 qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
1788
1789 /*
1790 * Post a new RECV work request to replace the one we just consumed.
1791 */
1792 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1793 if (ret) {
733252de 1794 error_report("rdma migration: error posting second control recv!");
2da776db
MH
1795 return ret;
1796 }
1797
1798 return 0;
1799}
1800
1801/*
1802 * Write an actual chunk of memory using RDMA.
1803 *
1804 * If we're using dynamic registration on the dest-side, we have to
1805 * send a registration command first.
1806 */
1807static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
1808 int current_index, uint64_t current_addr,
1809 uint64_t length)
1810{
1811 struct ibv_sge sge;
1812 struct ibv_send_wr send_wr = { 0 };
1813 struct ibv_send_wr *bad_wr;
1814 int reg_result_idx, ret, count = 0;
1815 uint64_t chunk, chunks;
1816 uint8_t *chunk_start, *chunk_end;
1817 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
1818 RDMARegister reg;
1819 RDMARegisterResult *reg_result;
1820 RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
1821 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1822 .type = RDMA_CONTROL_REGISTER_REQUEST,
1823 .repeat = 1,
1824 };
1825
1826retry:
1827 sge.addr = (uint64_t)(block->local_host_addr +
1828 (current_addr - block->offset));
1829 sge.length = length;
1830
1831 chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr);
1832 chunk_start = ram_chunk_start(block, chunk);
1833
1834 if (block->is_ram_block) {
1835 chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
1836
1837 if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
1838 chunks--;
1839 }
1840 } else {
1841 chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
1842
1843 if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
1844 chunks--;
1845 }
1846 }
1847
733252de
DDAG
1848 trace_qemu_rdma_write_one_top(chunks + 1,
1849 (chunks + 1) *
1850 (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
2da776db
MH
1851
1852 chunk_end = ram_chunk_end(block, chunk + chunks);
1853
1854 if (!rdma->pin_all) {
1855#ifdef RDMA_UNREGISTRATION_EXAMPLE
1856 qemu_rdma_unregister_waiting(rdma);
1857#endif
1858 }
1859
1860 while (test_bit(chunk, block->transit_bitmap)) {
1861 (void)count;
733252de 1862 trace_qemu_rdma_write_one_block(count++, current_index, chunk,
2da776db
MH
1863 sge.addr, length, rdma->nb_sent, block->nb_chunks);
1864
88571882 1865 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db
MH
1866
1867 if (ret < 0) {
733252de 1868 error_report("Failed to Wait for previous write to complete "
2da776db 1869 "block %d chunk %" PRIu64
733252de 1870 " current %" PRIu64 " len %" PRIu64 " %d",
2da776db
MH
1871 current_index, chunk, sge.addr, length, rdma->nb_sent);
1872 return ret;
1873 }
1874 }
1875
1876 if (!rdma->pin_all || !block->is_ram_block) {
1877 if (!block->remote_keys[chunk]) {
1878 /*
1879 * This chunk has not yet been registered, so first check to see
1880 * if the entire chunk is zero. If so, tell the other size to
1881 * memset() + madvise() the entire chunk without RDMA.
1882 */
1883
1884 if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length)
1885 && buffer_find_nonzero_offset((void *)sge.addr,
1886 length) == length) {
1887 RDMACompress comp = {
1888 .offset = current_addr,
1889 .value = 0,
1890 .block_idx = current_index,
1891 .length = length,
1892 };
1893
1894 head.len = sizeof(comp);
1895 head.type = RDMA_CONTROL_COMPRESS;
1896
733252de
DDAG
1897 trace_qemu_rdma_write_one_zero(chunk, sge.length,
1898 current_index, current_addr);
2da776db
MH
1899
1900 compress_to_network(&comp);
1901 ret = qemu_rdma_exchange_send(rdma, &head,
1902 (uint8_t *) &comp, NULL, NULL, NULL);
1903
1904 if (ret < 0) {
1905 return -EIO;
1906 }
1907
1908 acct_update_position(f, sge.length, true);
1909
1910 return 1;
1911 }
1912
1913 /*
1914 * Otherwise, tell other side to register.
1915 */
1916 reg.current_index = current_index;
1917 if (block->is_ram_block) {
1918 reg.key.current_addr = current_addr;
1919 } else {
1920 reg.key.chunk = chunk;
1921 }
1922 reg.chunks = chunks;
1923
733252de
DDAG
1924 trace_qemu_rdma_write_one_sendreg(chunk, sge.length, current_index,
1925 current_addr);
2da776db
MH
1926
1927 register_to_network(&reg);
1928 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
1929 &resp, &reg_result_idx, NULL);
1930 if (ret < 0) {
1931 return ret;
1932 }
1933
1934 /* try to overlap this single registration with the one we sent. */
1935 if (qemu_rdma_register_and_get_keys(rdma, block,
1936 (uint8_t *) sge.addr,
1937 &sge.lkey, NULL, chunk,
1938 chunk_start, chunk_end)) {
733252de 1939 error_report("cannot get lkey");
2da776db
MH
1940 return -EINVAL;
1941 }
1942
1943 reg_result = (RDMARegisterResult *)
1944 rdma->wr_data[reg_result_idx].control_curr;
1945
1946 network_to_result(reg_result);
1947
733252de
DDAG
1948 trace_qemu_rdma_write_one_recvregres(block->remote_keys[chunk],
1949 reg_result->rkey, chunk);
2da776db
MH
1950
1951 block->remote_keys[chunk] = reg_result->rkey;
1952 block->remote_host_addr = reg_result->host_addr;
1953 } else {
1954 /* already registered before */
1955 if (qemu_rdma_register_and_get_keys(rdma, block,
1956 (uint8_t *)sge.addr,
1957 &sge.lkey, NULL, chunk,
1958 chunk_start, chunk_end)) {
733252de 1959 error_report("cannot get lkey!");
2da776db
MH
1960 return -EINVAL;
1961 }
1962 }
1963
1964 send_wr.wr.rdma.rkey = block->remote_keys[chunk];
1965 } else {
1966 send_wr.wr.rdma.rkey = block->remote_rkey;
1967
1968 if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
1969 &sge.lkey, NULL, chunk,
1970 chunk_start, chunk_end)) {
733252de 1971 error_report("cannot get lkey!");
2da776db
MH
1972 return -EINVAL;
1973 }
1974 }
1975
1976 /*
1977 * Encode the ram block index and chunk within this wrid.
1978 * We will use this information at the time of completion
1979 * to figure out which bitmap to check against and then which
1980 * chunk in the bitmap to look for.
1981 */
1982 send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
1983 current_index, chunk);
1984
1985 send_wr.opcode = IBV_WR_RDMA_WRITE;
1986 send_wr.send_flags = IBV_SEND_SIGNALED;
1987 send_wr.sg_list = &sge;
1988 send_wr.num_sge = 1;
1989 send_wr.wr.rdma.remote_addr = block->remote_host_addr +
1990 (current_addr - block->offset);
1991
733252de
DDAG
1992 trace_qemu_rdma_write_one_post(chunk, sge.addr, send_wr.wr.rdma.remote_addr,
1993 sge.length);
2da776db
MH
1994
1995 /*
1996 * ibv_post_send() does not return negative error numbers,
1997 * per the specification they are positive - no idea why.
1998 */
1999 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
2000
2001 if (ret == ENOMEM) {
733252de 2002 trace_qemu_rdma_write_one_queue_full();
88571882 2003 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2004 if (ret < 0) {
733252de
DDAG
2005 error_report("rdma migration: failed to make "
2006 "room in full send queue! %d", ret);
2da776db
MH
2007 return ret;
2008 }
2009
2010 goto retry;
2011
2012 } else if (ret > 0) {
2013 perror("rdma migration: post rdma write failed");
2014 return -ret;
2015 }
2016
2017 set_bit(chunk, block->transit_bitmap);
2018 acct_update_position(f, sge.length, false);
2019 rdma->total_writes++;
2020
2021 return 0;
2022}
2023
2024/*
2025 * Push out any unwritten RDMA operations.
2026 *
2027 * We support sending out multiple chunks at the same time.
2028 * Not all of them need to get signaled in the completion queue.
2029 */
2030static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
2031{
2032 int ret;
2033
2034 if (!rdma->current_length) {
2035 return 0;
2036 }
2037
2038 ret = qemu_rdma_write_one(f, rdma,
2039 rdma->current_index, rdma->current_addr, rdma->current_length);
2040
2041 if (ret < 0) {
2042 return ret;
2043 }
2044
2045 if (ret == 0) {
2046 rdma->nb_sent++;
733252de 2047 trace_qemu_rdma_write_flush(rdma->nb_sent);
2da776db
MH
2048 }
2049
2050 rdma->current_length = 0;
2051 rdma->current_addr = 0;
2052
2053 return 0;
2054}
2055
2056static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
2057 uint64_t offset, uint64_t len)
2058{
44b59494
IY
2059 RDMALocalBlock *block;
2060 uint8_t *host_addr;
2061 uint8_t *chunk_end;
2062
2063 if (rdma->current_index < 0) {
2064 return 0;
2065 }
2066
2067 if (rdma->current_chunk < 0) {
2068 return 0;
2069 }
2070
2071 block = &(rdma->local_ram_blocks.block[rdma->current_index]);
2072 host_addr = block->local_host_addr + (offset - block->offset);
2073 chunk_end = ram_chunk_end(block, rdma->current_chunk);
2da776db
MH
2074
2075 if (rdma->current_length == 0) {
2076 return 0;
2077 }
2078
2079 /*
2080 * Only merge into chunk sequentially.
2081 */
2082 if (offset != (rdma->current_addr + rdma->current_length)) {
2083 return 0;
2084 }
2085
2da776db
MH
2086 if (offset < block->offset) {
2087 return 0;
2088 }
2089
2090 if ((offset + len) > (block->offset + block->length)) {
2091 return 0;
2092 }
2093
2da776db
MH
2094 if ((host_addr + len) > chunk_end) {
2095 return 0;
2096 }
2097
2098 return 1;
2099}
2100
2101/*
2102 * We're not actually writing here, but doing three things:
2103 *
2104 * 1. Identify the chunk the buffer belongs to.
2105 * 2. If the chunk is full or the buffer doesn't belong to the current
2106 * chunk, then start a new chunk and flush() the old chunk.
2107 * 3. To keep the hardware busy, we also group chunks into batches
2108 * and only require that a batch gets acknowledged in the completion
2109 * qeueue instead of each individual chunk.
2110 */
2111static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
2112 uint64_t block_offset, uint64_t offset,
2113 uint64_t len)
2114{
2115 uint64_t current_addr = block_offset + offset;
2116 uint64_t index = rdma->current_index;
2117 uint64_t chunk = rdma->current_chunk;
2118 int ret;
2119
2120 /* If we cannot merge it, we flush the current buffer first. */
2121 if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
2122 ret = qemu_rdma_write_flush(f, rdma);
2123 if (ret) {
2124 return ret;
2125 }
2126 rdma->current_length = 0;
2127 rdma->current_addr = current_addr;
2128
2129 ret = qemu_rdma_search_ram_block(rdma, block_offset,
2130 offset, len, &index, &chunk);
2131 if (ret) {
733252de 2132 error_report("ram block search failed");
2da776db
MH
2133 return ret;
2134 }
2135 rdma->current_index = index;
2136 rdma->current_chunk = chunk;
2137 }
2138
2139 /* merge it */
2140 rdma->current_length += len;
2141
2142 /* flush it if buffer is too large */
2143 if (rdma->current_length >= RDMA_MERGE_MAX) {
2144 return qemu_rdma_write_flush(f, rdma);
2145 }
2146
2147 return 0;
2148}
2149
2150static void qemu_rdma_cleanup(RDMAContext *rdma)
2151{
2152 struct rdma_cm_event *cm_event;
2153 int ret, idx;
2154
5a91337c 2155 if (rdma->cm_id && rdma->connected) {
2da776db
MH
2156 if (rdma->error_state) {
2157 RDMAControlHeader head = { .len = 0,
2158 .type = RDMA_CONTROL_ERROR,
2159 .repeat = 1,
2160 };
733252de 2161 error_report("Early error. Sending error.");
2da776db
MH
2162 qemu_rdma_post_send_control(rdma, NULL, &head);
2163 }
2164
2165 ret = rdma_disconnect(rdma->cm_id);
2166 if (!ret) {
733252de 2167 trace_qemu_rdma_cleanup_waiting_for_disconnect();
2da776db
MH
2168 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2169 if (!ret) {
2170 rdma_ack_cm_event(cm_event);
2171 }
2172 }
733252de 2173 trace_qemu_rdma_cleanup_disconnect();
5a91337c 2174 rdma->connected = false;
2da776db
MH
2175 }
2176
2177 g_free(rdma->block);
2178 rdma->block = NULL;
2179
1f22364b 2180 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2181 if (rdma->wr_data[idx].control_mr) {
2182 rdma->total_registrations--;
2183 ibv_dereg_mr(rdma->wr_data[idx].control_mr);
2184 }
2185 rdma->wr_data[idx].control_mr = NULL;
2186 }
2187
2188 if (rdma->local_ram_blocks.block) {
2189 while (rdma->local_ram_blocks.nb_blocks) {
2190 __qemu_rdma_delete_block(rdma,
2191 rdma->local_ram_blocks.block->offset);
2192 }
2193 }
2194
2da776db
MH
2195 if (rdma->cq) {
2196 ibv_destroy_cq(rdma->cq);
2197 rdma->cq = NULL;
2198 }
2199 if (rdma->comp_channel) {
2200 ibv_destroy_comp_channel(rdma->comp_channel);
2201 rdma->comp_channel = NULL;
2202 }
2203 if (rdma->pd) {
2204 ibv_dealloc_pd(rdma->pd);
2205 rdma->pd = NULL;
2206 }
2207 if (rdma->listen_id) {
2208 rdma_destroy_id(rdma->listen_id);
2209 rdma->listen_id = NULL;
2210 }
2211 if (rdma->cm_id) {
e325b49a
MH
2212 if (rdma->qp) {
2213 rdma_destroy_qp(rdma->cm_id);
2214 rdma->qp = NULL;
2215 }
2da776db
MH
2216 rdma_destroy_id(rdma->cm_id);
2217 rdma->cm_id = NULL;
2218 }
2219 if (rdma->channel) {
2220 rdma_destroy_event_channel(rdma->channel);
2221 rdma->channel = NULL;
2222 }
e1d0fb37
IY
2223 g_free(rdma->host);
2224 rdma->host = NULL;
2da776db
MH
2225}
2226
2227
2228static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all)
2229{
2230 int ret, idx;
2231 Error *local_err = NULL, **temp = &local_err;
2232
2233 /*
2234 * Will be validated against destination's actual capabilities
2235 * after the connect() completes.
2236 */
2237 rdma->pin_all = pin_all;
2238
2239 ret = qemu_rdma_resolve_host(rdma, temp);
2240 if (ret) {
2241 goto err_rdma_source_init;
2242 }
2243
2244 ret = qemu_rdma_alloc_pd_cq(rdma);
2245 if (ret) {
2246 ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()"
2247 " limits may be too low. Please check $ ulimit -a # and "
66988941 2248 "search for 'ulimit -l' in the output");
2da776db
MH
2249 goto err_rdma_source_init;
2250 }
2251
2252 ret = qemu_rdma_alloc_qp(rdma);
2253 if (ret) {
66988941 2254 ERROR(temp, "rdma migration: error allocating qp!");
2da776db
MH
2255 goto err_rdma_source_init;
2256 }
2257
2258 ret = qemu_rdma_init_ram_blocks(rdma);
2259 if (ret) {
66988941 2260 ERROR(temp, "rdma migration: error initializing ram blocks!");
2da776db
MH
2261 goto err_rdma_source_init;
2262 }
2263
1f22364b 2264 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2265 ret = qemu_rdma_reg_control(rdma, idx);
2266 if (ret) {
66988941 2267 ERROR(temp, "rdma migration: error registering %d control!",
2da776db
MH
2268 idx);
2269 goto err_rdma_source_init;
2270 }
2271 }
2272
2273 return 0;
2274
2275err_rdma_source_init:
2276 error_propagate(errp, local_err);
2277 qemu_rdma_cleanup(rdma);
2278 return -1;
2279}
2280
2281static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
2282{
2283 RDMACapabilities cap = {
2284 .version = RDMA_CONTROL_VERSION_CURRENT,
2285 .flags = 0,
2286 };
2287 struct rdma_conn_param conn_param = { .initiator_depth = 2,
2288 .retry_count = 5,
2289 .private_data = &cap,
2290 .private_data_len = sizeof(cap),
2291 };
2292 struct rdma_cm_event *cm_event;
2293 int ret;
2294
2295 /*
2296 * Only negotiate the capability with destination if the user
2297 * on the source first requested the capability.
2298 */
2299 if (rdma->pin_all) {
733252de 2300 trace_qemu_rdma_connect_pin_all_requested();
2da776db
MH
2301 cap.flags |= RDMA_CAPABILITY_PIN_ALL;
2302 }
2303
2304 caps_to_network(&cap);
2305
2306 ret = rdma_connect(rdma->cm_id, &conn_param);
2307 if (ret) {
2308 perror("rdma_connect");
66988941 2309 ERROR(errp, "connecting to destination!");
2da776db
MH
2310 rdma_destroy_id(rdma->cm_id);
2311 rdma->cm_id = NULL;
2312 goto err_rdma_source_connect;
2313 }
2314
2315 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2316 if (ret) {
2317 perror("rdma_get_cm_event after rdma_connect");
66988941 2318 ERROR(errp, "connecting to destination!");
2da776db
MH
2319 rdma_ack_cm_event(cm_event);
2320 rdma_destroy_id(rdma->cm_id);
2321 rdma->cm_id = NULL;
2322 goto err_rdma_source_connect;
2323 }
2324
2325 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
2326 perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
66988941 2327 ERROR(errp, "connecting to destination!");
2da776db
MH
2328 rdma_ack_cm_event(cm_event);
2329 rdma_destroy_id(rdma->cm_id);
2330 rdma->cm_id = NULL;
2331 goto err_rdma_source_connect;
2332 }
5a91337c 2333 rdma->connected = true;
2da776db
MH
2334
2335 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
2336 network_to_caps(&cap);
2337
2338 /*
2339 * Verify that the *requested* capabilities are supported by the destination
2340 * and disable them otherwise.
2341 */
2342 if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) {
2343 ERROR(errp, "Server cannot support pinning all memory. "
66988941 2344 "Will register memory dynamically.");
2da776db
MH
2345 rdma->pin_all = false;
2346 }
2347
733252de 2348 trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all);
2da776db
MH
2349
2350 rdma_ack_cm_event(cm_event);
2351
87772639 2352 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2da776db 2353 if (ret) {
66988941 2354 ERROR(errp, "posting second control recv!");
2da776db
MH
2355 goto err_rdma_source_connect;
2356 }
2357
2358 rdma->control_ready_expected = 1;
2359 rdma->nb_sent = 0;
2360 return 0;
2361
2362err_rdma_source_connect:
2363 qemu_rdma_cleanup(rdma);
2364 return -1;
2365}
2366
2367static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
2368{
2369 int ret = -EINVAL, idx;
2da776db
MH
2370 struct rdma_cm_id *listen_id;
2371 char ip[40] = "unknown";
7fc5b13f 2372 struct rdma_addrinfo *res;
b58c8552 2373 char port_str[16];
2da776db 2374
1f22364b 2375 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2376 rdma->wr_data[idx].control_len = 0;
2377 rdma->wr_data[idx].control_curr = NULL;
2378 }
2379
2380 if (rdma->host == NULL) {
66988941 2381 ERROR(errp, "RDMA host is not set!");
2da776db
MH
2382 rdma->error_state = -EINVAL;
2383 return -1;
2384 }
2385 /* create CM channel */
2386 rdma->channel = rdma_create_event_channel();
2387 if (!rdma->channel) {
66988941 2388 ERROR(errp, "could not create rdma event channel");
2da776db
MH
2389 rdma->error_state = -EINVAL;
2390 return -1;
2391 }
2392
2393 /* create CM id */
2394 ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
2395 if (ret) {
66988941 2396 ERROR(errp, "could not create cm_id!");
2da776db
MH
2397 goto err_dest_init_create_listen_id;
2398 }
2399
b58c8552
MH
2400 snprintf(port_str, 16, "%d", rdma->port);
2401 port_str[15] = '\0';
2da776db
MH
2402
2403 if (rdma->host && strcmp("", rdma->host)) {
7fc5b13f 2404 struct rdma_addrinfo *e;
6470215b 2405
7fc5b13f 2406 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
b58c8552 2407 if (ret < 0) {
7fc5b13f 2408 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2da776db
MH
2409 goto err_dest_init_bind_addr;
2410 }
b58c8552 2411
6470215b
MH
2412 for (e = res; e != NULL; e = e->ai_next) {
2413 inet_ntop(e->ai_family,
7fc5b13f 2414 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
733252de 2415 trace_qemu_rdma_dest_init_trying(rdma->host, ip);
7fc5b13f 2416 ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
6470215b 2417 if (!ret) {
7fc5b13f
MH
2418 if (e->ai_family == AF_INET6) {
2419 ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs);
2420 if (ret) {
2421 continue;
2422 }
2423 }
2424
6470215b
MH
2425 goto listen;
2426 }
2427 }
b58c8552 2428
6470215b
MH
2429 ERROR(errp, "Error: could not rdma_bind_addr!");
2430 goto err_dest_init_bind_addr;
2da776db 2431 } else {
66988941 2432 ERROR(errp, "migration host and port not specified!");
b58c8552
MH
2433 ret = -EINVAL;
2434 goto err_dest_init_bind_addr;
2da776db 2435 }
6470215b 2436listen:
2da776db
MH
2437
2438 rdma->listen_id = listen_id;
2439 qemu_rdma_dump_gid("dest_init", listen_id);
2440 return 0;
2441
2442err_dest_init_bind_addr:
2443 rdma_destroy_id(listen_id);
2444err_dest_init_create_listen_id:
2445 rdma_destroy_event_channel(rdma->channel);
2446 rdma->channel = NULL;
2447 rdma->error_state = ret;
2448 return ret;
2449
2450}
2451
2452static void *qemu_rdma_data_init(const char *host_port, Error **errp)
2453{
2454 RDMAContext *rdma = NULL;
2455 InetSocketAddress *addr;
2456
2457 if (host_port) {
2458 rdma = g_malloc0(sizeof(RDMAContext));
2459 memset(rdma, 0, sizeof(RDMAContext));
2460 rdma->current_index = -1;
2461 rdma->current_chunk = -1;
2462
2463 addr = inet_parse(host_port, NULL);
2464 if (addr != NULL) {
2465 rdma->port = atoi(addr->port);
2466 rdma->host = g_strdup(addr->host);
2467 } else {
2468 ERROR(errp, "bad RDMA migration address '%s'", host_port);
2469 g_free(rdma);
e325b49a 2470 rdma = NULL;
2da776db 2471 }
e325b49a
MH
2472
2473 qapi_free_InetSocketAddress(addr);
2da776db
MH
2474 }
2475
2476 return rdma;
2477}
2478
2479/*
2480 * QEMUFile interface to the control channel.
2481 * SEND messages for control only.
971ae6ef 2482 * VM's ram is handled with regular RDMA messages.
2da776db
MH
2483 */
2484static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
2485 int64_t pos, int size)
2486{
2487 QEMUFileRDMA *r = opaque;
2488 QEMUFile *f = r->file;
2489 RDMAContext *rdma = r->rdma;
2490 size_t remaining = size;
2491 uint8_t * data = (void *) buf;
2492 int ret;
2493
2494 CHECK_ERROR_STATE();
2495
2496 /*
2497 * Push out any writes that
971ae6ef 2498 * we're queued up for VM's ram.
2da776db
MH
2499 */
2500 ret = qemu_rdma_write_flush(f, rdma);
2501 if (ret < 0) {
2502 rdma->error_state = ret;
2503 return ret;
2504 }
2505
2506 while (remaining) {
2507 RDMAControlHeader head;
2508
2509 r->len = MIN(remaining, RDMA_SEND_INCREMENT);
2510 remaining -= r->len;
2511
2512 head.len = r->len;
2513 head.type = RDMA_CONTROL_QEMU_FILE;
2514
2515 ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
2516
2517 if (ret < 0) {
2518 rdma->error_state = ret;
2519 return ret;
2520 }
2521
2522 data += r->len;
2523 }
2524
2525 return size;
2526}
2527
2528static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
2529 int size, int idx)
2530{
2531 size_t len = 0;
2532
2533 if (rdma->wr_data[idx].control_len) {
733252de 2534 trace_qemu_rdma_fill(rdma->wr_data[idx].control_len, size);
2da776db
MH
2535
2536 len = MIN(size, rdma->wr_data[idx].control_len);
2537 memcpy(buf, rdma->wr_data[idx].control_curr, len);
2538 rdma->wr_data[idx].control_curr += len;
2539 rdma->wr_data[idx].control_len -= len;
2540 }
2541
2542 return len;
2543}
2544
2545/*
2546 * QEMUFile interface to the control channel.
2547 * RDMA links don't use bytestreams, so we have to
2548 * return bytes to QEMUFile opportunistically.
2549 */
2550static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
2551 int64_t pos, int size)
2552{
2553 QEMUFileRDMA *r = opaque;
2554 RDMAContext *rdma = r->rdma;
2555 RDMAControlHeader head;
2556 int ret = 0;
2557
2558 CHECK_ERROR_STATE();
2559
2560 /*
2561 * First, we hold on to the last SEND message we
2562 * were given and dish out the bytes until we run
2563 * out of bytes.
2564 */
2565 r->len = qemu_rdma_fill(r->rdma, buf, size, 0);
2566 if (r->len) {
2567 return r->len;
2568 }
2569
2570 /*
2571 * Once we run out, we block and wait for another
2572 * SEND message to arrive.
2573 */
2574 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE);
2575
2576 if (ret < 0) {
2577 rdma->error_state = ret;
2578 return ret;
2579 }
2580
2581 /*
2582 * SEND was received with new bytes, now try again.
2583 */
2584 return qemu_rdma_fill(r->rdma, buf, size, 0);
2585}
2586
2587/*
2588 * Block until all the outstanding chunks have been delivered by the hardware.
2589 */
2590static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
2591{
2592 int ret;
2593
2594 if (qemu_rdma_write_flush(f, rdma) < 0) {
2595 return -EIO;
2596 }
2597
2598 while (rdma->nb_sent) {
88571882 2599 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2600 if (ret < 0) {
733252de 2601 error_report("rdma migration: complete polling error!");
2da776db
MH
2602 return -EIO;
2603 }
2604 }
2605
2606 qemu_rdma_unregister_waiting(rdma);
2607
2608 return 0;
2609}
2610
2611static int qemu_rdma_close(void *opaque)
2612{
733252de 2613 trace_qemu_rdma_close();
2da776db
MH
2614 QEMUFileRDMA *r = opaque;
2615 if (r->rdma) {
2616 qemu_rdma_cleanup(r->rdma);
2617 g_free(r->rdma);
2618 }
2619 g_free(r);
2620 return 0;
2621}
2622
2623/*
2624 * Parameters:
2625 * @offset == 0 :
2626 * This means that 'block_offset' is a full virtual address that does not
2627 * belong to a RAMBlock of the virtual machine and instead
2628 * represents a private malloc'd memory area that the caller wishes to
2629 * transfer.
2630 *
2631 * @offset != 0 :
2632 * Offset is an offset to be added to block_offset and used
2633 * to also lookup the corresponding RAMBlock.
2634 *
2635 * @size > 0 :
2636 * Initiate an transfer this size.
2637 *
2638 * @size == 0 :
2639 * A 'hint' or 'advice' that means that we wish to speculatively
2640 * and asynchronously unregister this memory. In this case, there is no
52f35022 2641 * guarantee that the unregister will actually happen, for example,
2da776db
MH
2642 * if the memory is being actively transmitted. Additionally, the memory
2643 * may be re-registered at any future time if a write within the same
2644 * chunk was requested again, even if you attempted to unregister it
2645 * here.
2646 *
2647 * @size < 0 : TODO, not yet supported
2648 * Unregister the memory NOW. This means that the caller does not
2649 * expect there to be any future RDMA transfers and we just want to clean
2650 * things up. This is used in case the upper layer owns the memory and
2651 * cannot wait for qemu_fclose() to occur.
2652 *
2653 * @bytes_sent : User-specificed pointer to indicate how many bytes were
2654 * sent. Usually, this will not be more than a few bytes of
2655 * the protocol because most transfers are sent asynchronously.
2656 */
2657static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
2658 ram_addr_t block_offset, ram_addr_t offset,
2659 size_t size, int *bytes_sent)
2660{
2661 QEMUFileRDMA *rfile = opaque;
2662 RDMAContext *rdma = rfile->rdma;
2663 int ret;
2664
2665 CHECK_ERROR_STATE();
2666
2667 qemu_fflush(f);
2668
2669 if (size > 0) {
2670 /*
2671 * Add this page to the current 'chunk'. If the chunk
2672 * is full, or the page doen't belong to the current chunk,
2673 * an actual RDMA write will occur and a new chunk will be formed.
2674 */
2675 ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
2676 if (ret < 0) {
733252de 2677 error_report("rdma migration: write error! %d", ret);
2da776db
MH
2678 goto err;
2679 }
2680
2681 /*
2682 * We always return 1 bytes because the RDMA
2683 * protocol is completely asynchronous. We do not yet know
2684 * whether an identified chunk is zero or not because we're
2685 * waiting for other pages to potentially be merged with
2686 * the current chunk. So, we have to call qemu_update_position()
2687 * later on when the actual write occurs.
2688 */
2689 if (bytes_sent) {
2690 *bytes_sent = 1;
2691 }
2692 } else {
2693 uint64_t index, chunk;
2694
2695 /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
2696 if (size < 0) {
2697 ret = qemu_rdma_drain_cq(f, rdma);
2698 if (ret < 0) {
2699 fprintf(stderr, "rdma: failed to synchronously drain"
2700 " completion queue before unregistration.\n");
2701 goto err;
2702 }
2703 }
2704 */
2705
2706 ret = qemu_rdma_search_ram_block(rdma, block_offset,
2707 offset, size, &index, &chunk);
2708
2709 if (ret) {
733252de 2710 error_report("ram block search failed");
2da776db
MH
2711 goto err;
2712 }
2713
2714 qemu_rdma_signal_unregister(rdma, index, chunk, 0);
2715
2716 /*
52f35022 2717 * TODO: Synchronous, guaranteed unregistration (should not occur during
2da776db
MH
2718 * fast-path). Otherwise, unregisters will process on the next call to
2719 * qemu_rdma_drain_cq()
2720 if (size < 0) {
2721 qemu_rdma_unregister_waiting(rdma);
2722 }
2723 */
2724 }
2725
2726 /*
2727 * Drain the Completion Queue if possible, but do not block,
2728 * just poll.
2729 *
2730 * If nothing to poll, the end of the iteration will do this
2731 * again to make sure we don't overflow the request queue.
2732 */
2733 while (1) {
2734 uint64_t wr_id, wr_id_in;
88571882 2735 int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
2da776db 2736 if (ret < 0) {
733252de 2737 error_report("rdma migration: polling error! %d", ret);
2da776db
MH
2738 goto err;
2739 }
2740
2741 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
2742
2743 if (wr_id == RDMA_WRID_NONE) {
2744 break;
2745 }
2746 }
2747
2748 return RAM_SAVE_CONTROL_DELAYED;
2749err:
2750 rdma->error_state = ret;
2751 return ret;
2752}
2753
2754static int qemu_rdma_accept(RDMAContext *rdma)
2755{
2756 RDMACapabilities cap;
2757 struct rdma_conn_param conn_param = {
2758 .responder_resources = 2,
2759 .private_data = &cap,
2760 .private_data_len = sizeof(cap),
2761 };
2762 struct rdma_cm_event *cm_event;
2763 struct ibv_context *verbs;
2764 int ret = -EINVAL;
2765 int idx;
2766
2767 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2768 if (ret) {
2769 goto err_rdma_dest_wait;
2770 }
2771
2772 if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
2773 rdma_ack_cm_event(cm_event);
2774 goto err_rdma_dest_wait;
2775 }
2776
2777 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
2778
2779 network_to_caps(&cap);
2780
2781 if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
733252de 2782 error_report("Unknown source RDMA version: %d, bailing...",
2da776db
MH
2783 cap.version);
2784 rdma_ack_cm_event(cm_event);
2785 goto err_rdma_dest_wait;
2786 }
2787
2788 /*
2789 * Respond with only the capabilities this version of QEMU knows about.
2790 */
2791 cap.flags &= known_capabilities;
2792
2793 /*
2794 * Enable the ones that we do know about.
2795 * Add other checks here as new ones are introduced.
2796 */
2797 if (cap.flags & RDMA_CAPABILITY_PIN_ALL) {
2798 rdma->pin_all = true;
2799 }
2800
2801 rdma->cm_id = cm_event->id;
2802 verbs = cm_event->id->verbs;
2803
2804 rdma_ack_cm_event(cm_event);
2805
733252de 2806 trace_qemu_rdma_accept_pin_state(rdma->pin_all);
2da776db
MH
2807
2808 caps_to_network(&cap);
2809
733252de 2810 trace_qemu_rdma_accept_pin_verbsc(verbs);
2da776db
MH
2811
2812 if (!rdma->verbs) {
2813 rdma->verbs = verbs;
2814 } else if (rdma->verbs != verbs) {
733252de
DDAG
2815 error_report("ibv context not matching %p, %p!", rdma->verbs,
2816 verbs);
2da776db
MH
2817 goto err_rdma_dest_wait;
2818 }
2819
2820 qemu_rdma_dump_id("dest_init", verbs);
2821
2822 ret = qemu_rdma_alloc_pd_cq(rdma);
2823 if (ret) {
733252de 2824 error_report("rdma migration: error allocating pd and cq!");
2da776db
MH
2825 goto err_rdma_dest_wait;
2826 }
2827
2828 ret = qemu_rdma_alloc_qp(rdma);
2829 if (ret) {
733252de 2830 error_report("rdma migration: error allocating qp!");
2da776db
MH
2831 goto err_rdma_dest_wait;
2832 }
2833
2834 ret = qemu_rdma_init_ram_blocks(rdma);
2835 if (ret) {
733252de 2836 error_report("rdma migration: error initializing ram blocks!");
2da776db
MH
2837 goto err_rdma_dest_wait;
2838 }
2839
1f22364b 2840 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2841 ret = qemu_rdma_reg_control(rdma, idx);
2842 if (ret) {
733252de 2843 error_report("rdma: error registering %d control", idx);
2da776db
MH
2844 goto err_rdma_dest_wait;
2845 }
2846 }
2847
2848 qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL);
2849
2850 ret = rdma_accept(rdma->cm_id, &conn_param);
2851 if (ret) {
733252de 2852 error_report("rdma_accept returns %d", ret);
2da776db
MH
2853 goto err_rdma_dest_wait;
2854 }
2855
2856 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2857 if (ret) {
733252de 2858 error_report("rdma_accept get_cm_event failed %d", ret);
2da776db
MH
2859 goto err_rdma_dest_wait;
2860 }
2861
2862 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
733252de 2863 error_report("rdma_accept not event established");
2da776db
MH
2864 rdma_ack_cm_event(cm_event);
2865 goto err_rdma_dest_wait;
2866 }
2867
2868 rdma_ack_cm_event(cm_event);
5a91337c 2869 rdma->connected = true;
2da776db 2870
87772639 2871 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2da776db 2872 if (ret) {
733252de 2873 error_report("rdma migration: error posting second control recv");
2da776db
MH
2874 goto err_rdma_dest_wait;
2875 }
2876
2877 qemu_rdma_dump_gid("dest_connect", rdma->cm_id);
2878
2879 return 0;
2880
2881err_rdma_dest_wait:
2882 rdma->error_state = ret;
2883 qemu_rdma_cleanup(rdma);
2884 return ret;
2885}
2886
2887/*
2888 * During each iteration of the migration, we listen for instructions
2889 * by the source VM to perform dynamic page registrations before they
2890 * can perform RDMA operations.
2891 *
2892 * We respond with the 'rkey'.
2893 *
2894 * Keep doing this until the source tells us to stop.
2895 */
2896static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
2897 uint64_t flags)
2898{
2899 RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
2900 .type = RDMA_CONTROL_REGISTER_RESULT,
2901 .repeat = 0,
2902 };
2903 RDMAControlHeader unreg_resp = { .len = 0,
2904 .type = RDMA_CONTROL_UNREGISTER_FINISHED,
2905 .repeat = 0,
2906 };
2907 RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
2908 .repeat = 1 };
2909 QEMUFileRDMA *rfile = opaque;
2910 RDMAContext *rdma = rfile->rdma;
2911 RDMALocalBlocks *local = &rdma->local_ram_blocks;
2912 RDMAControlHeader head;
2913 RDMARegister *reg, *registers;
2914 RDMACompress *comp;
2915 RDMARegisterResult *reg_result;
2916 static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE];
2917 RDMALocalBlock *block;
2918 void *host_addr;
2919 int ret = 0;
2920 int idx = 0;
2921 int count = 0;
2922 int i = 0;
2923
2924 CHECK_ERROR_STATE();
2925
2926 do {
733252de 2927 trace_qemu_rdma_registration_handle_wait(flags);
2da776db
MH
2928
2929 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
2930
2931 if (ret < 0) {
2932 break;
2933 }
2934
2935 if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
733252de
DDAG
2936 error_report("rdma: Too many requests in this message (%d)."
2937 "Bailing.", head.repeat);
2da776db
MH
2938 ret = -EIO;
2939 break;
2940 }
2941
2942 switch (head.type) {
2943 case RDMA_CONTROL_COMPRESS:
2944 comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
2945 network_to_compress(comp);
2946
733252de
DDAG
2947 trace_qemu_rdma_registration_handle_compress(comp->length,
2948 comp->block_idx,
2949 comp->offset);
2da776db
MH
2950 block = &(rdma->local_ram_blocks.block[comp->block_idx]);
2951
2952 host_addr = block->local_host_addr +
2953 (comp->offset - block->offset);
2954
2955 ram_handle_compressed(host_addr, comp->value, comp->length);
2956 break;
2957
2958 case RDMA_CONTROL_REGISTER_FINISHED:
733252de 2959 trace_qemu_rdma_registration_handle_finished();
2da776db
MH
2960 goto out;
2961
2962 case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
733252de 2963 trace_qemu_rdma_registration_handle_ram_blocks();
2da776db
MH
2964
2965 if (rdma->pin_all) {
2966 ret = qemu_rdma_reg_whole_ram_blocks(rdma);
2967 if (ret) {
733252de
DDAG
2968 error_report("rdma migration: error dest "
2969 "registering ram blocks");
2da776db
MH
2970 goto out;
2971 }
2972 }
2973
2974 /*
2975 * Dest uses this to prepare to transmit the RAMBlock descriptions
2976 * to the source VM after connection setup.
2977 * Both sides use the "remote" structure to communicate and update
2978 * their "local" descriptions with what was sent.
2979 */
2980 for (i = 0; i < local->nb_blocks; i++) {
2981 rdma->block[i].remote_host_addr =
2982 (uint64_t)(local->block[i].local_host_addr);
2983
2984 if (rdma->pin_all) {
2985 rdma->block[i].remote_rkey = local->block[i].mr->rkey;
2986 }
2987
2988 rdma->block[i].offset = local->block[i].offset;
2989 rdma->block[i].length = local->block[i].length;
2990
2991 remote_block_to_network(&rdma->block[i]);
2992 }
2993
2994 blocks.len = rdma->local_ram_blocks.nb_blocks
2995 * sizeof(RDMARemoteBlock);
2996
2997
2998 ret = qemu_rdma_post_send_control(rdma,
2999 (uint8_t *) rdma->block, &blocks);
3000
3001 if (ret < 0) {
733252de 3002 error_report("rdma migration: error sending remote info");
2da776db
MH
3003 goto out;
3004 }
3005
3006 break;
3007 case RDMA_CONTROL_REGISTER_REQUEST:
733252de 3008 trace_qemu_rdma_registration_handle_register(head.repeat);
2da776db
MH
3009
3010 reg_resp.repeat = head.repeat;
3011 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3012
3013 for (count = 0; count < head.repeat; count++) {
3014 uint64_t chunk;
3015 uint8_t *chunk_start, *chunk_end;
3016
3017 reg = &registers[count];
3018 network_to_register(reg);
3019
3020 reg_result = &results[count];
3021
733252de 3022 trace_qemu_rdma_registration_handle_register_loop(count,
2da776db
MH
3023 reg->current_index, reg->key.current_addr, reg->chunks);
3024
3025 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3026 if (block->is_ram_block) {
3027 host_addr = (block->local_host_addr +
3028 (reg->key.current_addr - block->offset));
3029 chunk = ram_chunk_index(block->local_host_addr,
3030 (uint8_t *) host_addr);
3031 } else {
3032 chunk = reg->key.chunk;
3033 host_addr = block->local_host_addr +
3034 (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT));
3035 }
3036 chunk_start = ram_chunk_start(block, chunk);
3037 chunk_end = ram_chunk_end(block, chunk + reg->chunks);
3038 if (qemu_rdma_register_and_get_keys(rdma, block,
3039 (uint8_t *)host_addr, NULL, &reg_result->rkey,
3040 chunk, chunk_start, chunk_end)) {
733252de 3041 error_report("cannot get rkey");
2da776db
MH
3042 ret = -EINVAL;
3043 goto out;
3044 }
3045
3046 reg_result->host_addr = (uint64_t) block->local_host_addr;
3047
733252de
DDAG
3048 trace_qemu_rdma_registration_handle_register_rkey(
3049 reg_result->rkey);
2da776db
MH
3050
3051 result_to_network(reg_result);
3052 }
3053
3054 ret = qemu_rdma_post_send_control(rdma,
3055 (uint8_t *) results, &reg_resp);
3056
3057 if (ret < 0) {
733252de 3058 error_report("Failed to send control buffer");
2da776db
MH
3059 goto out;
3060 }
3061 break;
3062 case RDMA_CONTROL_UNREGISTER_REQUEST:
733252de 3063 trace_qemu_rdma_registration_handle_unregister(head.repeat);
2da776db
MH
3064 unreg_resp.repeat = head.repeat;
3065 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3066
3067 for (count = 0; count < head.repeat; count++) {
3068 reg = &registers[count];
3069 network_to_register(reg);
3070
733252de
DDAG
3071 trace_qemu_rdma_registration_handle_unregister_loop(count,
3072 reg->current_index, reg->key.chunk);
2da776db
MH
3073
3074 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3075
3076 ret = ibv_dereg_mr(block->pmr[reg->key.chunk]);
3077 block->pmr[reg->key.chunk] = NULL;
3078
3079 if (ret != 0) {
3080 perror("rdma unregistration chunk failed");
3081 ret = -ret;
3082 goto out;
3083 }
3084
3085 rdma->total_registrations--;
3086
733252de
DDAG
3087 trace_qemu_rdma_registration_handle_unregister_success(
3088 reg->key.chunk);
2da776db
MH
3089 }
3090
3091 ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
3092
3093 if (ret < 0) {
733252de 3094 error_report("Failed to send control buffer");
2da776db
MH
3095 goto out;
3096 }
3097 break;
3098 case RDMA_CONTROL_REGISTER_RESULT:
733252de 3099 error_report("Invalid RESULT message at dest.");
2da776db
MH
3100 ret = -EIO;
3101 goto out;
3102 default:
733252de 3103 error_report("Unknown control message %s", control_desc[head.type]);
2da776db
MH
3104 ret = -EIO;
3105 goto out;
3106 }
3107 } while (1);
3108out:
3109 if (ret < 0) {
3110 rdma->error_state = ret;
3111 }
3112 return ret;
3113}
3114
3115static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
3116 uint64_t flags)
3117{
3118 QEMUFileRDMA *rfile = opaque;
3119 RDMAContext *rdma = rfile->rdma;
3120
3121 CHECK_ERROR_STATE();
3122
733252de 3123 trace_qemu_rdma_registration_start(flags);
2da776db
MH
3124 qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
3125 qemu_fflush(f);
3126
3127 return 0;
3128}
3129
3130/*
3131 * Inform dest that dynamic registrations are done for now.
3132 * First, flush writes, if any.
3133 */
3134static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
3135 uint64_t flags)
3136{
3137 Error *local_err = NULL, **errp = &local_err;
3138 QEMUFileRDMA *rfile = opaque;
3139 RDMAContext *rdma = rfile->rdma;
3140 RDMAControlHeader head = { .len = 0, .repeat = 1 };
3141 int ret = 0;
3142
3143 CHECK_ERROR_STATE();
3144
3145 qemu_fflush(f);
3146 ret = qemu_rdma_drain_cq(f, rdma);
3147
3148 if (ret < 0) {
3149 goto err;
3150 }
3151
3152 if (flags == RAM_CONTROL_SETUP) {
3153 RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT };
3154 RDMALocalBlocks *local = &rdma->local_ram_blocks;
3155 int reg_result_idx, i, j, nb_remote_blocks;
3156
3157 head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
733252de 3158 trace_qemu_rdma_registration_stop_ram();
2da776db
MH
3159
3160 /*
3161 * Make sure that we parallelize the pinning on both sides.
3162 * For very large guests, doing this serially takes a really
3163 * long time, so we have to 'interleave' the pinning locally
3164 * with the control messages by performing the pinning on this
3165 * side before we receive the control response from the other
3166 * side that the pinning has completed.
3167 */
3168 ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp,
3169 &reg_result_idx, rdma->pin_all ?
3170 qemu_rdma_reg_whole_ram_blocks : NULL);
3171 if (ret < 0) {
66988941 3172 ERROR(errp, "receiving remote info!");
2da776db
MH
3173 return ret;
3174 }
3175
2da776db
MH
3176 nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock);
3177
3178 /*
3179 * The protocol uses two different sets of rkeys (mutually exclusive):
3180 * 1. One key to represent the virtual address of the entire ram block.
3181 * (dynamic chunk registration disabled - pin everything with one rkey.)
3182 * 2. One to represent individual chunks within a ram block.
3183 * (dynamic chunk registration enabled - pin individual chunks.)
3184 *
3185 * Once the capability is successfully negotiated, the destination transmits
3186 * the keys to use (or sends them later) including the virtual addresses
3187 * and then propagates the remote ram block descriptions to his local copy.
3188 */
3189
3190 if (local->nb_blocks != nb_remote_blocks) {
3191 ERROR(errp, "ram blocks mismatch #1! "
3192 "Your QEMU command line parameters are probably "
66988941 3193 "not identical on both the source and destination.");
2da776db
MH
3194 return -EINVAL;
3195 }
3196
885e8f98
IY
3197 qemu_rdma_move_header(rdma, reg_result_idx, &resp);
3198 memcpy(rdma->block,
3199 rdma->wr_data[reg_result_idx].control_curr, resp.len);
2da776db
MH
3200 for (i = 0; i < nb_remote_blocks; i++) {
3201 network_to_remote_block(&rdma->block[i]);
3202
3203 /* search local ram blocks */
3204 for (j = 0; j < local->nb_blocks; j++) {
3205 if (rdma->block[i].offset != local->block[j].offset) {
3206 continue;
3207 }
3208
3209 if (rdma->block[i].length != local->block[j].length) {
3210 ERROR(errp, "ram blocks mismatch #2! "
3211 "Your QEMU command line parameters are probably "
66988941 3212 "not identical on both the source and destination.");
2da776db
MH
3213 return -EINVAL;
3214 }
3215 local->block[j].remote_host_addr =
3216 rdma->block[i].remote_host_addr;
3217 local->block[j].remote_rkey = rdma->block[i].remote_rkey;
3218 break;
3219 }
3220
3221 if (j >= local->nb_blocks) {
3222 ERROR(errp, "ram blocks mismatch #3! "
3223 "Your QEMU command line parameters are probably "
66988941 3224 "not identical on both the source and destination.");
2da776db
MH
3225 return -EINVAL;
3226 }
3227 }
3228 }
3229
733252de 3230 trace_qemu_rdma_registration_stop(flags);
2da776db
MH
3231
3232 head.type = RDMA_CONTROL_REGISTER_FINISHED;
3233 ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
3234
3235 if (ret < 0) {
3236 goto err;
3237 }
3238
3239 return 0;
3240err:
3241 rdma->error_state = ret;
3242 return ret;
3243}
3244
3245static int qemu_rdma_get_fd(void *opaque)
3246{
3247 QEMUFileRDMA *rfile = opaque;
3248 RDMAContext *rdma = rfile->rdma;
3249
3250 return rdma->comp_channel->fd;
3251}
3252
3253const QEMUFileOps rdma_read_ops = {
3254 .get_buffer = qemu_rdma_get_buffer,
3255 .get_fd = qemu_rdma_get_fd,
3256 .close = qemu_rdma_close,
3257 .hook_ram_load = qemu_rdma_registration_handle,
3258};
3259
3260const QEMUFileOps rdma_write_ops = {
3261 .put_buffer = qemu_rdma_put_buffer,
3262 .close = qemu_rdma_close,
3263 .before_ram_iterate = qemu_rdma_registration_start,
3264 .after_ram_iterate = qemu_rdma_registration_stop,
3265 .save_page = qemu_rdma_save_page,
3266};
3267
3268static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
3269{
3270 QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA));
3271
3272 if (qemu_file_mode_is_not_valid(mode)) {
3273 return NULL;
3274 }
3275
3276 r->rdma = rdma;
3277
3278 if (mode[0] == 'w') {
3279 r->file = qemu_fopen_ops(r, &rdma_write_ops);
3280 } else {
3281 r->file = qemu_fopen_ops(r, &rdma_read_ops);
3282 }
3283
3284 return r->file;
3285}
3286
3287static void rdma_accept_incoming_migration(void *opaque)
3288{
3289 RDMAContext *rdma = opaque;
3290 int ret;
3291 QEMUFile *f;
3292 Error *local_err = NULL, **errp = &local_err;
3293
733252de 3294 trace_qemu_dma_accept_incoming_migration();
2da776db
MH
3295 ret = qemu_rdma_accept(rdma);
3296
3297 if (ret) {
66988941 3298 ERROR(errp, "RDMA Migration initialization failed!");
2da776db
MH
3299 return;
3300 }
3301
733252de 3302 trace_qemu_dma_accept_incoming_migration_accepted();
2da776db
MH
3303
3304 f = qemu_fopen_rdma(rdma, "rb");
3305 if (f == NULL) {
66988941 3306 ERROR(errp, "could not qemu_fopen_rdma!");
2da776db
MH
3307 qemu_rdma_cleanup(rdma);
3308 return;
3309 }
3310
3311 rdma->migration_started_on_destination = 1;
3312 process_incoming_migration(f);
3313}
3314
3315void rdma_start_incoming_migration(const char *host_port, Error **errp)
3316{
3317 int ret;
3318 RDMAContext *rdma;
3319 Error *local_err = NULL;
3320
733252de 3321 trace_rdma_start_incoming_migration();
2da776db
MH
3322 rdma = qemu_rdma_data_init(host_port, &local_err);
3323
3324 if (rdma == NULL) {
3325 goto err;
3326 }
3327
3328 ret = qemu_rdma_dest_init(rdma, &local_err);
3329
3330 if (ret) {
3331 goto err;
3332 }
3333
733252de 3334 trace_rdma_start_incoming_migration_after_dest_init();
2da776db
MH
3335
3336 ret = rdma_listen(rdma->listen_id, 5);
3337
3338 if (ret) {
66988941 3339 ERROR(errp, "listening on socket!");
2da776db
MH
3340 goto err;
3341 }
3342
733252de 3343 trace_rdma_start_incoming_migration_after_rdma_listen();
2da776db
MH
3344
3345 qemu_set_fd_handler2(rdma->channel->fd, NULL,
3346 rdma_accept_incoming_migration, NULL,
3347 (void *)(intptr_t) rdma);
3348 return;
3349err:
3350 error_propagate(errp, local_err);
3351 g_free(rdma);
3352}
3353
3354void rdma_start_outgoing_migration(void *opaque,
3355 const char *host_port, Error **errp)
3356{
3357 MigrationState *s = opaque;
3358 Error *local_err = NULL, **temp = &local_err;
3359 RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err);
3360 int ret = 0;
3361
3362 if (rdma == NULL) {
66988941 3363 ERROR(temp, "Failed to initialize RDMA data structures! %d", ret);
2da776db
MH
3364 goto err;
3365 }
3366
3367 ret = qemu_rdma_source_init(rdma, &local_err,
41310c68 3368 s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
2da776db
MH
3369
3370 if (ret) {
3371 goto err;
3372 }
3373
733252de 3374 trace_rdma_start_outgoing_migration_after_rdma_source_init();
2da776db
MH
3375 ret = qemu_rdma_connect(rdma, &local_err);
3376
3377 if (ret) {
3378 goto err;
3379 }
3380
733252de 3381 trace_rdma_start_outgoing_migration_after_rdma_connect();
2da776db
MH
3382
3383 s->file = qemu_fopen_rdma(rdma, "wb");
3384 migrate_fd_connect(s);
3385 return;
3386err:
3387 error_propagate(errp, local_err);
3388 g_free(rdma);
3389 migrate_fd_error(s);
3390}