]> git.proxmox.com Git - mirror_qemu.git/blame - migration/rdma.c
ram: make all save_page functions take a uint64_t parameter
[mirror_qemu.git] / migration / rdma.c
CommitLineData
2da776db
MH
1/*
2 * RDMA protocol and interfaces
3 *
4 * Copyright IBM, Corp. 2010-2013
5 *
6 * Authors:
7 * Michael R. Hines <mrhines@us.ibm.com>
8 * Jiuxing Liu <jl@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or
11 * later. See the COPYING file in the top-level directory.
12 *
13 */
14#include "qemu-common.h"
15#include "migration/migration.h"
16#include "migration/qemu-file.h"
17#include "exec/cpu-common.h"
18#include "qemu/main-loop.h"
19#include "qemu/sockets.h"
20#include "qemu/bitmap.h"
21#include "block/coroutine.h"
22#include <stdio.h>
23#include <sys/types.h>
24#include <sys/socket.h>
25#include <netdb.h>
26#include <arpa/inet.h>
27#include <string.h>
28#include <rdma/rdma_cma.h>
733252de 29#include "trace.h"
2da776db
MH
30
31/*
32 * Print and error on both the Monitor and the Log file.
33 */
34#define ERROR(errp, fmt, ...) \
35 do { \
66988941 36 fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
2da776db
MH
37 if (errp && (*(errp) == NULL)) { \
38 error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
39 } \
40 } while (0)
41
42#define RDMA_RESOLVE_TIMEOUT_MS 10000
43
44/* Do not merge data if larger than this. */
45#define RDMA_MERGE_MAX (2 * 1024 * 1024)
46#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
47
48#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
49
50/*
51 * This is only for non-live state being migrated.
52 * Instead of RDMA_WRITE messages, we use RDMA_SEND
53 * messages for that state, which requires a different
54 * delivery design than main memory.
55 */
56#define RDMA_SEND_INCREMENT 32768
57
58/*
59 * Maximum size infiniband SEND message
60 */
61#define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
62#define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
63
64#define RDMA_CONTROL_VERSION_CURRENT 1
65/*
66 * Capabilities for negotiation.
67 */
68#define RDMA_CAPABILITY_PIN_ALL 0x01
69
70/*
71 * Add the other flags above to this list of known capabilities
72 * as they are introduced.
73 */
74static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
75
76#define CHECK_ERROR_STATE() \
77 do { \
78 if (rdma->error_state) { \
79 if (!rdma->error_reported) { \
733252de
DDAG
80 error_report("RDMA is in an error state waiting migration" \
81 " to abort!"); \
2da776db
MH
82 rdma->error_reported = 1; \
83 } \
84 return rdma->error_state; \
85 } \
86 } while (0);
87
88/*
89 * A work request ID is 64-bits and we split up these bits
90 * into 3 parts:
91 *
92 * bits 0-15 : type of control message, 2^16
93 * bits 16-29: ram block index, 2^14
94 * bits 30-63: ram block chunk number, 2^34
95 *
96 * The last two bit ranges are only used for RDMA writes,
97 * in order to track their completion and potentially
98 * also track unregistration status of the message.
99 */
100#define RDMA_WRID_TYPE_SHIFT 0UL
101#define RDMA_WRID_BLOCK_SHIFT 16UL
102#define RDMA_WRID_CHUNK_SHIFT 30UL
103
104#define RDMA_WRID_TYPE_MASK \
105 ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
106
107#define RDMA_WRID_BLOCK_MASK \
108 (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
109
110#define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
111
112/*
113 * RDMA migration protocol:
114 * 1. RDMA Writes (data messages, i.e. RAM)
115 * 2. IB Send/Recv (control channel messages)
116 */
117enum {
118 RDMA_WRID_NONE = 0,
119 RDMA_WRID_RDMA_WRITE = 1,
120 RDMA_WRID_SEND_CONTROL = 2000,
121 RDMA_WRID_RECV_CONTROL = 4000,
122};
123
2ae31aea 124static const char *wrid_desc[] = {
2da776db
MH
125 [RDMA_WRID_NONE] = "NONE",
126 [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
127 [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
128 [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV",
129};
130
131/*
132 * Work request IDs for IB SEND messages only (not RDMA writes).
133 * This is used by the migration protocol to transmit
134 * control messages (such as device state and registration commands)
135 *
136 * We could use more WRs, but we have enough for now.
137 */
138enum {
139 RDMA_WRID_READY = 0,
140 RDMA_WRID_DATA,
141 RDMA_WRID_CONTROL,
142 RDMA_WRID_MAX,
143};
144
145/*
146 * SEND/RECV IB Control Messages.
147 */
148enum {
149 RDMA_CONTROL_NONE = 0,
150 RDMA_CONTROL_ERROR,
151 RDMA_CONTROL_READY, /* ready to receive */
152 RDMA_CONTROL_QEMU_FILE, /* QEMUFile-transmitted bytes */
153 RDMA_CONTROL_RAM_BLOCKS_REQUEST, /* RAMBlock synchronization */
154 RDMA_CONTROL_RAM_BLOCKS_RESULT, /* RAMBlock synchronization */
155 RDMA_CONTROL_COMPRESS, /* page contains repeat values */
156 RDMA_CONTROL_REGISTER_REQUEST, /* dynamic page registration */
157 RDMA_CONTROL_REGISTER_RESULT, /* key to use after registration */
158 RDMA_CONTROL_REGISTER_FINISHED, /* current iteration finished */
159 RDMA_CONTROL_UNREGISTER_REQUEST, /* dynamic UN-registration */
160 RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
161};
162
2ae31aea 163static const char *control_desc[] = {
2da776db
MH
164 [RDMA_CONTROL_NONE] = "NONE",
165 [RDMA_CONTROL_ERROR] = "ERROR",
166 [RDMA_CONTROL_READY] = "READY",
167 [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE",
168 [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST",
169 [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT",
170 [RDMA_CONTROL_COMPRESS] = "COMPRESS",
171 [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST",
172 [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT",
173 [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED",
174 [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST",
175 [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED",
176};
177
178/*
179 * Memory and MR structures used to represent an IB Send/Recv work request.
180 * This is *not* used for RDMA writes, only IB Send/Recv.
181 */
182typedef struct {
183 uint8_t control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */
184 struct ibv_mr *control_mr; /* registration metadata */
185 size_t control_len; /* length of the message */
186 uint8_t *control_curr; /* start of unconsumed bytes */
187} RDMAWorkRequestData;
188
189/*
190 * Negotiate RDMA capabilities during connection-setup time.
191 */
192typedef struct {
193 uint32_t version;
194 uint32_t flags;
195} RDMACapabilities;
196
197static void caps_to_network(RDMACapabilities *cap)
198{
199 cap->version = htonl(cap->version);
200 cap->flags = htonl(cap->flags);
201}
202
203static void network_to_caps(RDMACapabilities *cap)
204{
205 cap->version = ntohl(cap->version);
206 cap->flags = ntohl(cap->flags);
207}
208
209/*
210 * Representation of a RAMBlock from an RDMA perspective.
211 * This is not transmitted, only local.
212 * This and subsequent structures cannot be linked lists
213 * because we're using a single IB message to transmit
214 * the information. It's small anyway, so a list is overkill.
215 */
216typedef struct RDMALocalBlock {
217 uint8_t *local_host_addr; /* local virtual address */
218 uint64_t remote_host_addr; /* remote virtual address */
219 uint64_t offset;
220 uint64_t length;
221 struct ibv_mr **pmr; /* MRs for chunk-level registration */
222 struct ibv_mr *mr; /* MR for non-chunk-level registration */
223 uint32_t *remote_keys; /* rkeys for chunk-level registration */
224 uint32_t remote_rkey; /* rkeys for non-chunk-level registration */
225 int index; /* which block are we */
226 bool is_ram_block;
227 int nb_chunks;
228 unsigned long *transit_bitmap;
229 unsigned long *unregister_bitmap;
230} RDMALocalBlock;
231
232/*
233 * Also represents a RAMblock, but only on the dest.
234 * This gets transmitted by the dest during connection-time
235 * to the source VM and then is used to populate the
236 * corresponding RDMALocalBlock with
237 * the information needed to perform the actual RDMA.
238 */
239typedef struct QEMU_PACKED RDMARemoteBlock {
240 uint64_t remote_host_addr;
241 uint64_t offset;
242 uint64_t length;
243 uint32_t remote_rkey;
244 uint32_t padding;
245} RDMARemoteBlock;
246
247static uint64_t htonll(uint64_t v)
248{
249 union { uint32_t lv[2]; uint64_t llv; } u;
250 u.lv[0] = htonl(v >> 32);
251 u.lv[1] = htonl(v & 0xFFFFFFFFULL);
252 return u.llv;
253}
254
255static uint64_t ntohll(uint64_t v) {
256 union { uint32_t lv[2]; uint64_t llv; } u;
257 u.llv = v;
258 return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]);
259}
260
261static void remote_block_to_network(RDMARemoteBlock *rb)
262{
263 rb->remote_host_addr = htonll(rb->remote_host_addr);
264 rb->offset = htonll(rb->offset);
265 rb->length = htonll(rb->length);
266 rb->remote_rkey = htonl(rb->remote_rkey);
267}
268
269static void network_to_remote_block(RDMARemoteBlock *rb)
270{
271 rb->remote_host_addr = ntohll(rb->remote_host_addr);
272 rb->offset = ntohll(rb->offset);
273 rb->length = ntohll(rb->length);
274 rb->remote_rkey = ntohl(rb->remote_rkey);
275}
276
277/*
278 * Virtual address of the above structures used for transmitting
279 * the RAMBlock descriptions at connection-time.
280 * This structure is *not* transmitted.
281 */
282typedef struct RDMALocalBlocks {
283 int nb_blocks;
284 bool init; /* main memory init complete */
285 RDMALocalBlock *block;
286} RDMALocalBlocks;
287
288/*
289 * Main data structure for RDMA state.
290 * While there is only one copy of this structure being allocated right now,
291 * this is the place where one would start if you wanted to consider
292 * having more than one RDMA connection open at the same time.
293 */
294typedef struct RDMAContext {
295 char *host;
296 int port;
297
1f22364b 298 RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
2da776db
MH
299
300 /*
301 * This is used by *_exchange_send() to figure out whether or not
302 * the initial "READY" message has already been received or not.
303 * This is because other functions may potentially poll() and detect
304 * the READY message before send() does, in which case we need to
305 * know if it completed.
306 */
307 int control_ready_expected;
308
309 /* number of outstanding writes */
310 int nb_sent;
311
312 /* store info about current buffer so that we can
313 merge it with future sends */
314 uint64_t current_addr;
315 uint64_t current_length;
316 /* index of ram block the current buffer belongs to */
317 int current_index;
318 /* index of the chunk in the current ram block */
319 int current_chunk;
320
321 bool pin_all;
322
323 /*
324 * infiniband-specific variables for opening the device
325 * and maintaining connection state and so forth.
326 *
327 * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
328 * cm_id->verbs, cm_id->channel, and cm_id->qp.
329 */
330 struct rdma_cm_id *cm_id; /* connection manager ID */
331 struct rdma_cm_id *listen_id;
5a91337c 332 bool connected;
2da776db
MH
333
334 struct ibv_context *verbs;
335 struct rdma_event_channel *channel;
336 struct ibv_qp *qp; /* queue pair */
337 struct ibv_comp_channel *comp_channel; /* completion channel */
338 struct ibv_pd *pd; /* protection domain */
339 struct ibv_cq *cq; /* completion queue */
340
341 /*
342 * If a previous write failed (perhaps because of a failed
343 * memory registration, then do not attempt any future work
344 * and remember the error state.
345 */
346 int error_state;
347 int error_reported;
348
349 /*
350 * Description of ram blocks used throughout the code.
351 */
352 RDMALocalBlocks local_ram_blocks;
353 RDMARemoteBlock *block;
354
355 /*
356 * Migration on *destination* started.
357 * Then use coroutine yield function.
358 * Source runs in a thread, so we don't care.
359 */
360 int migration_started_on_destination;
361
362 int total_registrations;
363 int total_writes;
364
365 int unregister_current, unregister_next;
366 uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
367
368 GHashTable *blockmap;
369} RDMAContext;
370
371/*
372 * Interface to the rest of the migration call stack.
373 */
374typedef struct QEMUFileRDMA {
375 RDMAContext *rdma;
376 size_t len;
377 void *file;
378} QEMUFileRDMA;
379
380/*
381 * Main structure for IB Send/Recv control messages.
382 * This gets prepended at the beginning of every Send/Recv.
383 */
384typedef struct QEMU_PACKED {
385 uint32_t len; /* Total length of data portion */
386 uint32_t type; /* which control command to perform */
387 uint32_t repeat; /* number of commands in data portion of same type */
388 uint32_t padding;
389} RDMAControlHeader;
390
391static void control_to_network(RDMAControlHeader *control)
392{
393 control->type = htonl(control->type);
394 control->len = htonl(control->len);
395 control->repeat = htonl(control->repeat);
396}
397
398static void network_to_control(RDMAControlHeader *control)
399{
400 control->type = ntohl(control->type);
401 control->len = ntohl(control->len);
402 control->repeat = ntohl(control->repeat);
403}
404
405/*
406 * Register a single Chunk.
407 * Information sent by the source VM to inform the dest
408 * to register an single chunk of memory before we can perform
409 * the actual RDMA operation.
410 */
411typedef struct QEMU_PACKED {
412 union QEMU_PACKED {
413 uint64_t current_addr; /* offset into the ramblock of the chunk */
414 uint64_t chunk; /* chunk to lookup if unregistering */
415 } key;
416 uint32_t current_index; /* which ramblock the chunk belongs to */
417 uint32_t padding;
418 uint64_t chunks; /* how many sequential chunks to register */
419} RDMARegister;
420
421static void register_to_network(RDMARegister *reg)
422{
423 reg->key.current_addr = htonll(reg->key.current_addr);
424 reg->current_index = htonl(reg->current_index);
425 reg->chunks = htonll(reg->chunks);
426}
427
428static void network_to_register(RDMARegister *reg)
429{
430 reg->key.current_addr = ntohll(reg->key.current_addr);
431 reg->current_index = ntohl(reg->current_index);
432 reg->chunks = ntohll(reg->chunks);
433}
434
435typedef struct QEMU_PACKED {
436 uint32_t value; /* if zero, we will madvise() */
437 uint32_t block_idx; /* which ram block index */
438 uint64_t offset; /* where in the remote ramblock this chunk */
439 uint64_t length; /* length of the chunk */
440} RDMACompress;
441
442static void compress_to_network(RDMACompress *comp)
443{
444 comp->value = htonl(comp->value);
445 comp->block_idx = htonl(comp->block_idx);
446 comp->offset = htonll(comp->offset);
447 comp->length = htonll(comp->length);
448}
449
450static void network_to_compress(RDMACompress *comp)
451{
452 comp->value = ntohl(comp->value);
453 comp->block_idx = ntohl(comp->block_idx);
454 comp->offset = ntohll(comp->offset);
455 comp->length = ntohll(comp->length);
456}
457
458/*
459 * The result of the dest's memory registration produces an "rkey"
460 * which the source VM must reference in order to perform
461 * the RDMA operation.
462 */
463typedef struct QEMU_PACKED {
464 uint32_t rkey;
465 uint32_t padding;
466 uint64_t host_addr;
467} RDMARegisterResult;
468
469static void result_to_network(RDMARegisterResult *result)
470{
471 result->rkey = htonl(result->rkey);
472 result->host_addr = htonll(result->host_addr);
473};
474
475static void network_to_result(RDMARegisterResult *result)
476{
477 result->rkey = ntohl(result->rkey);
478 result->host_addr = ntohll(result->host_addr);
479};
480
481const char *print_wrid(int wrid);
482static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
483 uint8_t *data, RDMAControlHeader *resp,
484 int *resp_idx,
485 int (*callback)(RDMAContext *rdma));
486
dd286ed7
IY
487static inline uint64_t ram_chunk_index(const uint8_t *start,
488 const uint8_t *host)
2da776db
MH
489{
490 return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
491}
492
dd286ed7 493static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
2da776db
MH
494 uint64_t i)
495{
496 return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr)
497 + (i << RDMA_REG_CHUNK_SHIFT));
498}
499
dd286ed7
IY
500static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
501 uint64_t i)
2da776db
MH
502{
503 uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
504 (1UL << RDMA_REG_CHUNK_SHIFT);
505
506 if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) {
507 result = rdma_ram_block->local_host_addr + rdma_ram_block->length;
508 }
509
510 return result;
511}
512
ba795761 513static int rdma_add_block(RDMAContext *rdma, void *host_addr,
2da776db
MH
514 ram_addr_t block_offset, uint64_t length)
515{
516 RDMALocalBlocks *local = &rdma->local_ram_blocks;
517 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
518 (void *) block_offset);
519 RDMALocalBlock *old = local->block;
520
521 assert(block == NULL);
522
523 local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1));
524
525 if (local->nb_blocks) {
526 int x;
527
528 for (x = 0; x < local->nb_blocks; x++) {
529 g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
530 g_hash_table_insert(rdma->blockmap, (void *)old[x].offset,
531 &local->block[x]);
532 }
533 memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks);
534 g_free(old);
535 }
536
537 block = &local->block[local->nb_blocks];
538
539 block->local_host_addr = host_addr;
540 block->offset = block_offset;
541 block->length = length;
542 block->index = local->nb_blocks;
543 block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL;
544 block->transit_bitmap = bitmap_new(block->nb_chunks);
545 bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
546 block->unregister_bitmap = bitmap_new(block->nb_chunks);
547 bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
548 block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t));
549
550 block->is_ram_block = local->init ? false : true;
551
552 g_hash_table_insert(rdma->blockmap, (void *) block_offset, block);
553
ba795761
DDAG
554 trace_rdma_add_block(local->nb_blocks, (uint64_t) block->local_host_addr,
555 block->offset, block->length,
556 (uint64_t) (block->local_host_addr + block->length),
557 BITS_TO_LONGS(block->nb_chunks) *
558 sizeof(unsigned long) * 8,
559 block->nb_chunks);
2da776db
MH
560
561 local->nb_blocks++;
562
563 return 0;
564}
565
566/*
567 * Memory regions need to be registered with the device and queue pairs setup
568 * in advanced before the migration starts. This tells us where the RAM blocks
569 * are so that we can register them individually.
570 */
571static void qemu_rdma_init_one_block(void *host_addr,
572 ram_addr_t block_offset, ram_addr_t length, void *opaque)
573{
ba795761 574 rdma_add_block(opaque, host_addr, block_offset, length);
2da776db
MH
575}
576
577/*
578 * Identify the RAMBlocks and their quantity. They will be references to
579 * identify chunk boundaries inside each RAMBlock and also be referenced
580 * during dynamic page registration.
581 */
582static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
583{
584 RDMALocalBlocks *local = &rdma->local_ram_blocks;
585
586 assert(rdma->blockmap == NULL);
587 rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
588 memset(local, 0, sizeof *local);
589 qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma);
733252de 590 trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
2da776db
MH
591 rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) *
592 rdma->local_ram_blocks.nb_blocks);
593 local->init = true;
594 return 0;
595}
596
ba795761 597static int rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset)
2da776db
MH
598{
599 RDMALocalBlocks *local = &rdma->local_ram_blocks;
600 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
601 (void *) block_offset);
602 RDMALocalBlock *old = local->block;
603 int x;
604
605 assert(block);
606
607 if (block->pmr) {
608 int j;
609
610 for (j = 0; j < block->nb_chunks; j++) {
611 if (!block->pmr[j]) {
612 continue;
613 }
614 ibv_dereg_mr(block->pmr[j]);
615 rdma->total_registrations--;
616 }
617 g_free(block->pmr);
618 block->pmr = NULL;
619 }
620
621 if (block->mr) {
622 ibv_dereg_mr(block->mr);
623 rdma->total_registrations--;
624 block->mr = NULL;
625 }
626
627 g_free(block->transit_bitmap);
628 block->transit_bitmap = NULL;
629
630 g_free(block->unregister_bitmap);
631 block->unregister_bitmap = NULL;
632
633 g_free(block->remote_keys);
634 block->remote_keys = NULL;
635
636 for (x = 0; x < local->nb_blocks; x++) {
637 g_hash_table_remove(rdma->blockmap, (void *)old[x].offset);
638 }
639
640 if (local->nb_blocks > 1) {
641
642 local->block = g_malloc0(sizeof(RDMALocalBlock) *
643 (local->nb_blocks - 1));
644
645 if (block->index) {
646 memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
647 }
648
649 if (block->index < (local->nb_blocks - 1)) {
650 memcpy(local->block + block->index, old + (block->index + 1),
651 sizeof(RDMALocalBlock) *
652 (local->nb_blocks - (block->index + 1)));
653 }
654 } else {
655 assert(block == local->block);
656 local->block = NULL;
657 }
658
ba795761 659 trace_rdma_delete_block(local->nb_blocks,
733252de
DDAG
660 (uint64_t)block->local_host_addr,
661 block->offset, block->length,
662 (uint64_t)(block->local_host_addr + block->length),
663 BITS_TO_LONGS(block->nb_chunks) *
664 sizeof(unsigned long) * 8, block->nb_chunks);
2da776db
MH
665
666 g_free(old);
667
668 local->nb_blocks--;
669
670 if (local->nb_blocks) {
671 for (x = 0; x < local->nb_blocks; x++) {
672 g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset,
673 &local->block[x]);
674 }
675 }
676
677 return 0;
678}
679
680/*
681 * Put in the log file which RDMA device was opened and the details
682 * associated with that device.
683 */
684static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
685{
7fc5b13f
MH
686 struct ibv_port_attr port;
687
688 if (ibv_query_port(verbs, 1, &port)) {
733252de 689 error_report("Failed to query port information");
7fc5b13f
MH
690 return;
691 }
692
2da776db
MH
693 printf("%s RDMA Device opened: kernel name %s "
694 "uverbs device name %s, "
7fc5b13f
MH
695 "infiniband_verbs class device path %s, "
696 "infiniband class device path %s, "
697 "transport: (%d) %s\n",
2da776db
MH
698 who,
699 verbs->device->name,
700 verbs->device->dev_name,
701 verbs->device->dev_path,
7fc5b13f
MH
702 verbs->device->ibdev_path,
703 port.link_layer,
704 (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" :
02942db7 705 ((port.link_layer == IBV_LINK_LAYER_ETHERNET)
7fc5b13f 706 ? "Ethernet" : "Unknown"));
2da776db
MH
707}
708
709/*
710 * Put in the log file the RDMA gid addressing information,
711 * useful for folks who have trouble understanding the
712 * RDMA device hierarchy in the kernel.
713 */
714static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
715{
716 char sgid[33];
717 char dgid[33];
718 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
719 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
733252de 720 trace_qemu_rdma_dump_gid(who, sgid, dgid);
2da776db
MH
721}
722
7fc5b13f
MH
723/*
724 * As of now, IPv6 over RoCE / iWARP is not supported by linux.
725 * We will try the next addrinfo struct, and fail if there are
726 * no other valid addresses to bind against.
727 *
728 * If user is listening on '[::]', then we will not have a opened a device
729 * yet and have no way of verifying if the device is RoCE or not.
730 *
731 * In this case, the source VM will throw an error for ALL types of
732 * connections (both IPv4 and IPv6) if the destination machine does not have
733 * a regular infiniband network available for use.
734 *
4c293dc6 735 * The only way to guarantee that an error is thrown for broken kernels is
7fc5b13f
MH
736 * for the management software to choose a *specific* interface at bind time
737 * and validate what time of hardware it is.
738 *
739 * Unfortunately, this puts the user in a fix:
02942db7 740 *
7fc5b13f
MH
741 * If the source VM connects with an IPv4 address without knowing that the
742 * destination has bound to '[::]' the migration will unconditionally fail
743 * unless the management software is explicitly listening on the the IPv4
744 * address while using a RoCE-based device.
745 *
746 * If the source VM connects with an IPv6 address, then we're OK because we can
747 * throw an error on the source (and similarly on the destination).
02942db7 748 *
7fc5b13f
MH
749 * But in mixed environments, this will be broken for a while until it is fixed
750 * inside linux.
751 *
752 * We do provide a *tiny* bit of help in this function: We can list all of the
753 * devices in the system and check to see if all the devices are RoCE or
02942db7 754 * Infiniband.
7fc5b13f
MH
755 *
756 * If we detect that we have a *pure* RoCE environment, then we can safely
4c293dc6 757 * thrown an error even if the management software has specified '[::]' as the
7fc5b13f
MH
758 * bind address.
759 *
760 * However, if there is are multiple hetergeneous devices, then we cannot make
761 * this assumption and the user just has to be sure they know what they are
762 * doing.
763 *
764 * Patches are being reviewed on linux-rdma.
765 */
766static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs)
767{
768 struct ibv_port_attr port_attr;
769
770 /* This bug only exists in linux, to our knowledge. */
771#ifdef CONFIG_LINUX
772
02942db7 773 /*
7fc5b13f 774 * Verbs are only NULL if management has bound to '[::]'.
02942db7 775 *
7fc5b13f
MH
776 * Let's iterate through all the devices and see if there any pure IB
777 * devices (non-ethernet).
02942db7 778 *
7fc5b13f 779 * If not, then we can safely proceed with the migration.
4c293dc6 780 * Otherwise, there are no guarantees until the bug is fixed in linux.
7fc5b13f
MH
781 */
782 if (!verbs) {
02942db7 783 int num_devices, x;
7fc5b13f
MH
784 struct ibv_device ** dev_list = ibv_get_device_list(&num_devices);
785 bool roce_found = false;
786 bool ib_found = false;
787
788 for (x = 0; x < num_devices; x++) {
789 verbs = ibv_open_device(dev_list[x]);
790
791 if (ibv_query_port(verbs, 1, &port_attr)) {
792 ibv_close_device(verbs);
793 ERROR(errp, "Could not query initial IB port");
794 return -EINVAL;
795 }
796
797 if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
798 ib_found = true;
799 } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
800 roce_found = true;
801 }
802
803 ibv_close_device(verbs);
804
805 }
806
807 if (roce_found) {
808 if (ib_found) {
809 fprintf(stderr, "WARN: migrations may fail:"
810 " IPv6 over RoCE / iWARP in linux"
811 " is broken. But since you appear to have a"
812 " mixed RoCE / IB environment, be sure to only"
813 " migrate over the IB fabric until the kernel "
814 " fixes the bug.\n");
815 } else {
816 ERROR(errp, "You only have RoCE / iWARP devices in your systems"
817 " and your management software has specified '[::]'"
818 ", but IPv6 over RoCE / iWARP is not supported in Linux.");
819 return -ENONET;
820 }
821 }
822
823 return 0;
824 }
825
826 /*
827 * If we have a verbs context, that means that some other than '[::]' was
02942db7
SW
828 * used by the management software for binding. In which case we can
829 * actually warn the user about a potentially broken kernel.
7fc5b13f
MH
830 */
831
832 /* IB ports start with 1, not 0 */
833 if (ibv_query_port(verbs, 1, &port_attr)) {
834 ERROR(errp, "Could not query initial IB port");
835 return -EINVAL;
836 }
837
838 if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
839 ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 "
840 "(but patches on linux-rdma in progress)");
841 return -ENONET;
842 }
843
844#endif
845
846 return 0;
847}
848
2da776db
MH
849/*
850 * Figure out which RDMA device corresponds to the requested IP hostname
851 * Also create the initial connection manager identifiers for opening
852 * the connection.
853 */
854static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
855{
856 int ret;
7fc5b13f 857 struct rdma_addrinfo *res;
2da776db
MH
858 char port_str[16];
859 struct rdma_cm_event *cm_event;
860 char ip[40] = "unknown";
7fc5b13f 861 struct rdma_addrinfo *e;
2da776db
MH
862
863 if (rdma->host == NULL || !strcmp(rdma->host, "")) {
66988941 864 ERROR(errp, "RDMA hostname has not been set");
7fc5b13f 865 return -EINVAL;
2da776db
MH
866 }
867
868 /* create CM channel */
869 rdma->channel = rdma_create_event_channel();
870 if (!rdma->channel) {
66988941 871 ERROR(errp, "could not create CM channel");
7fc5b13f 872 return -EINVAL;
2da776db
MH
873 }
874
875 /* create CM id */
876 ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
877 if (ret) {
66988941 878 ERROR(errp, "could not create channel id");
2da776db
MH
879 goto err_resolve_create_id;
880 }
881
882 snprintf(port_str, 16, "%d", rdma->port);
883 port_str[15] = '\0';
884
7fc5b13f 885 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
2da776db 886 if (ret < 0) {
7fc5b13f 887 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2da776db
MH
888 goto err_resolve_get_addr;
889 }
890
6470215b
MH
891 for (e = res; e != NULL; e = e->ai_next) {
892 inet_ntop(e->ai_family,
7fc5b13f 893 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
733252de 894 trace_qemu_rdma_resolve_host_trying(rdma->host, ip);
2da776db 895
7fc5b13f 896 ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
6470215b
MH
897 RDMA_RESOLVE_TIMEOUT_MS);
898 if (!ret) {
c89aa2f1
MH
899 if (e->ai_family == AF_INET6) {
900 ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs);
901 if (ret) {
902 continue;
903 }
7fc5b13f 904 }
6470215b
MH
905 goto route;
906 }
2da776db
MH
907 }
908
6470215b
MH
909 ERROR(errp, "could not resolve address %s", rdma->host);
910 goto err_resolve_get_addr;
911
912route:
2da776db
MH
913 qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
914
915 ret = rdma_get_cm_event(rdma->channel, &cm_event);
916 if (ret) {
66988941 917 ERROR(errp, "could not perform event_addr_resolved");
2da776db
MH
918 goto err_resolve_get_addr;
919 }
920
921 if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
66988941 922 ERROR(errp, "result not equal to event_addr_resolved %s",
2da776db
MH
923 rdma_event_str(cm_event->event));
924 perror("rdma_resolve_addr");
2a934347 925 rdma_ack_cm_event(cm_event);
7fc5b13f 926 ret = -EINVAL;
2da776db
MH
927 goto err_resolve_get_addr;
928 }
929 rdma_ack_cm_event(cm_event);
930
931 /* resolve route */
932 ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
933 if (ret) {
66988941 934 ERROR(errp, "could not resolve rdma route");
2da776db
MH
935 goto err_resolve_get_addr;
936 }
937
938 ret = rdma_get_cm_event(rdma->channel, &cm_event);
939 if (ret) {
66988941 940 ERROR(errp, "could not perform event_route_resolved");
2da776db
MH
941 goto err_resolve_get_addr;
942 }
943 if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
66988941 944 ERROR(errp, "result not equal to event_route_resolved: %s",
2da776db
MH
945 rdma_event_str(cm_event->event));
946 rdma_ack_cm_event(cm_event);
7fc5b13f 947 ret = -EINVAL;
2da776db
MH
948 goto err_resolve_get_addr;
949 }
950 rdma_ack_cm_event(cm_event);
951 rdma->verbs = rdma->cm_id->verbs;
952 qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs);
953 qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id);
954 return 0;
955
956err_resolve_get_addr:
957 rdma_destroy_id(rdma->cm_id);
958 rdma->cm_id = NULL;
959err_resolve_create_id:
960 rdma_destroy_event_channel(rdma->channel);
961 rdma->channel = NULL;
7fc5b13f 962 return ret;
2da776db
MH
963}
964
965/*
966 * Create protection domain and completion queues
967 */
968static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
969{
970 /* allocate pd */
971 rdma->pd = ibv_alloc_pd(rdma->verbs);
972 if (!rdma->pd) {
733252de 973 error_report("failed to allocate protection domain");
2da776db
MH
974 return -1;
975 }
976
977 /* create completion channel */
978 rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
979 if (!rdma->comp_channel) {
733252de 980 error_report("failed to allocate completion channel");
2da776db
MH
981 goto err_alloc_pd_cq;
982 }
983
984 /*
985 * Completion queue can be filled by both read and write work requests,
986 * so must reflect the sum of both possible queue sizes.
987 */
988 rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
989 NULL, rdma->comp_channel, 0);
990 if (!rdma->cq) {
733252de 991 error_report("failed to allocate completion queue");
2da776db
MH
992 goto err_alloc_pd_cq;
993 }
994
995 return 0;
996
997err_alloc_pd_cq:
998 if (rdma->pd) {
999 ibv_dealloc_pd(rdma->pd);
1000 }
1001 if (rdma->comp_channel) {
1002 ibv_destroy_comp_channel(rdma->comp_channel);
1003 }
1004 rdma->pd = NULL;
1005 rdma->comp_channel = NULL;
1006 return -1;
1007
1008}
1009
1010/*
1011 * Create queue pairs.
1012 */
1013static int qemu_rdma_alloc_qp(RDMAContext *rdma)
1014{
1015 struct ibv_qp_init_attr attr = { 0 };
1016 int ret;
1017
1018 attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX;
1019 attr.cap.max_recv_wr = 3;
1020 attr.cap.max_send_sge = 1;
1021 attr.cap.max_recv_sge = 1;
1022 attr.send_cq = rdma->cq;
1023 attr.recv_cq = rdma->cq;
1024 attr.qp_type = IBV_QPT_RC;
1025
1026 ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
1027 if (ret) {
1028 return -1;
1029 }
1030
1031 rdma->qp = rdma->cm_id->qp;
1032 return 0;
1033}
1034
1035static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
1036{
1037 int i;
1038 RDMALocalBlocks *local = &rdma->local_ram_blocks;
1039
1040 for (i = 0; i < local->nb_blocks; i++) {
1041 local->block[i].mr =
1042 ibv_reg_mr(rdma->pd,
1043 local->block[i].local_host_addr,
1044 local->block[i].length,
1045 IBV_ACCESS_LOCAL_WRITE |
1046 IBV_ACCESS_REMOTE_WRITE
1047 );
1048 if (!local->block[i].mr) {
1049 perror("Failed to register local dest ram block!\n");
1050 break;
1051 }
1052 rdma->total_registrations++;
1053 }
1054
1055 if (i >= local->nb_blocks) {
1056 return 0;
1057 }
1058
1059 for (i--; i >= 0; i--) {
1060 ibv_dereg_mr(local->block[i].mr);
1061 rdma->total_registrations--;
1062 }
1063
1064 return -1;
1065
1066}
1067
1068/*
1069 * Find the ram block that corresponds to the page requested to be
1070 * transmitted by QEMU.
1071 *
1072 * Once the block is found, also identify which 'chunk' within that
1073 * block that the page belongs to.
1074 *
1075 * This search cannot fail or the migration will fail.
1076 */
1077static int qemu_rdma_search_ram_block(RDMAContext *rdma,
1078 uint64_t block_offset,
1079 uint64_t offset,
1080 uint64_t length,
1081 uint64_t *block_index,
1082 uint64_t *chunk_index)
1083{
1084 uint64_t current_addr = block_offset + offset;
1085 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
1086 (void *) block_offset);
1087 assert(block);
1088 assert(current_addr >= block->offset);
1089 assert((current_addr + length) <= (block->offset + block->length));
1090
1091 *block_index = block->index;
1092 *chunk_index = ram_chunk_index(block->local_host_addr,
1093 block->local_host_addr + (current_addr - block->offset));
1094
1095 return 0;
1096}
1097
1098/*
1099 * Register a chunk with IB. If the chunk was already registered
1100 * previously, then skip.
1101 *
1102 * Also return the keys associated with the registration needed
1103 * to perform the actual RDMA operation.
1104 */
1105static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
1106 RDMALocalBlock *block, uint8_t *host_addr,
1107 uint32_t *lkey, uint32_t *rkey, int chunk,
1108 uint8_t *chunk_start, uint8_t *chunk_end)
1109{
1110 if (block->mr) {
1111 if (lkey) {
1112 *lkey = block->mr->lkey;
1113 }
1114 if (rkey) {
1115 *rkey = block->mr->rkey;
1116 }
1117 return 0;
1118 }
1119
1120 /* allocate memory to store chunk MRs */
1121 if (!block->pmr) {
1122 block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
2da776db
MH
1123 }
1124
1125 /*
1126 * If 'rkey', then we're the destination, so grant access to the source.
1127 *
1128 * If 'lkey', then we're the source VM, so grant access only to ourselves.
1129 */
1130 if (!block->pmr[chunk]) {
1131 uint64_t len = chunk_end - chunk_start;
1132
733252de 1133 trace_qemu_rdma_register_and_get_keys(len, chunk_start);
2da776db
MH
1134
1135 block->pmr[chunk] = ibv_reg_mr(rdma->pd,
1136 chunk_start, len,
1137 (rkey ? (IBV_ACCESS_LOCAL_WRITE |
1138 IBV_ACCESS_REMOTE_WRITE) : 0));
1139
1140 if (!block->pmr[chunk]) {
1141 perror("Failed to register chunk!");
1142 fprintf(stderr, "Chunk details: block: %d chunk index %d"
1143 " start %" PRIu64 " end %" PRIu64 " host %" PRIu64
1144 " local %" PRIu64 " registrations: %d\n",
1145 block->index, chunk, (uint64_t) chunk_start,
1146 (uint64_t) chunk_end, (uint64_t) host_addr,
1147 (uint64_t) block->local_host_addr,
1148 rdma->total_registrations);
1149 return -1;
1150 }
1151 rdma->total_registrations++;
1152 }
1153
1154 if (lkey) {
1155 *lkey = block->pmr[chunk]->lkey;
1156 }
1157 if (rkey) {
1158 *rkey = block->pmr[chunk]->rkey;
1159 }
1160 return 0;
1161}
1162
1163/*
1164 * Register (at connection time) the memory used for control
1165 * channel messages.
1166 */
1167static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
1168{
1169 rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd,
1170 rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER,
1171 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
1172 if (rdma->wr_data[idx].control_mr) {
1173 rdma->total_registrations++;
1174 return 0;
1175 }
733252de 1176 error_report("qemu_rdma_reg_control failed");
2da776db
MH
1177 return -1;
1178}
1179
1180const char *print_wrid(int wrid)
1181{
1182 if (wrid >= RDMA_WRID_RECV_CONTROL) {
1183 return wrid_desc[RDMA_WRID_RECV_CONTROL];
1184 }
1185 return wrid_desc[wrid];
1186}
1187
1188/*
1189 * RDMA requires memory registration (mlock/pinning), but this is not good for
1190 * overcommitment.
1191 *
1192 * In preparation for the future where LRU information or workload-specific
1193 * writable writable working set memory access behavior is available to QEMU
1194 * it would be nice to have in place the ability to UN-register/UN-pin
1195 * particular memory regions from the RDMA hardware when it is determine that
1196 * those regions of memory will likely not be accessed again in the near future.
1197 *
1198 * While we do not yet have such information right now, the following
1199 * compile-time option allows us to perform a non-optimized version of this
1200 * behavior.
1201 *
1202 * By uncommenting this option, you will cause *all* RDMA transfers to be
1203 * unregistered immediately after the transfer completes on both sides of the
1204 * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
1205 *
1206 * This will have a terrible impact on migration performance, so until future
1207 * workload information or LRU information is available, do not attempt to use
1208 * this feature except for basic testing.
1209 */
1210//#define RDMA_UNREGISTRATION_EXAMPLE
1211
1212/*
1213 * Perform a non-optimized memory unregistration after every transfer
1214 * for demonsration purposes, only if pin-all is not requested.
1215 *
1216 * Potential optimizations:
1217 * 1. Start a new thread to run this function continuously
1218 - for bit clearing
1219 - and for receipt of unregister messages
1220 * 2. Use an LRU.
1221 * 3. Use workload hints.
1222 */
1223static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
1224{
1225 while (rdma->unregistrations[rdma->unregister_current]) {
1226 int ret;
1227 uint64_t wr_id = rdma->unregistrations[rdma->unregister_current];
1228 uint64_t chunk =
1229 (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1230 uint64_t index =
1231 (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1232 RDMALocalBlock *block =
1233 &(rdma->local_ram_blocks.block[index]);
1234 RDMARegister reg = { .current_index = index };
1235 RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED,
1236 };
1237 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1238 .type = RDMA_CONTROL_UNREGISTER_REQUEST,
1239 .repeat = 1,
1240 };
1241
733252de
DDAG
1242 trace_qemu_rdma_unregister_waiting_proc(chunk,
1243 rdma->unregister_current);
2da776db
MH
1244
1245 rdma->unregistrations[rdma->unregister_current] = 0;
1246 rdma->unregister_current++;
1247
1248 if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) {
1249 rdma->unregister_current = 0;
1250 }
1251
1252
1253 /*
1254 * Unregistration is speculative (because migration is single-threaded
1255 * and we cannot break the protocol's inifinband message ordering).
1256 * Thus, if the memory is currently being used for transmission,
1257 * then abort the attempt to unregister and try again
1258 * later the next time a completion is received for this memory.
1259 */
1260 clear_bit(chunk, block->unregister_bitmap);
1261
1262 if (test_bit(chunk, block->transit_bitmap)) {
733252de 1263 trace_qemu_rdma_unregister_waiting_inflight(chunk);
2da776db
MH
1264 continue;
1265 }
1266
733252de 1267 trace_qemu_rdma_unregister_waiting_send(chunk);
2da776db
MH
1268
1269 ret = ibv_dereg_mr(block->pmr[chunk]);
1270 block->pmr[chunk] = NULL;
1271 block->remote_keys[chunk] = 0;
1272
1273 if (ret != 0) {
1274 perror("unregistration chunk failed");
1275 return -ret;
1276 }
1277 rdma->total_registrations--;
1278
1279 reg.key.chunk = chunk;
1280 register_to_network(&reg);
1281 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
1282 &resp, NULL, NULL);
1283 if (ret < 0) {
1284 return ret;
1285 }
1286
733252de 1287 trace_qemu_rdma_unregister_waiting_complete(chunk);
2da776db
MH
1288 }
1289
1290 return 0;
1291}
1292
1293static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
1294 uint64_t chunk)
1295{
1296 uint64_t result = wr_id & RDMA_WRID_TYPE_MASK;
1297
1298 result |= (index << RDMA_WRID_BLOCK_SHIFT);
1299 result |= (chunk << RDMA_WRID_CHUNK_SHIFT);
1300
1301 return result;
1302}
1303
1304/*
1305 * Set bit for unregistration in the next iteration.
1306 * We cannot transmit right here, but will unpin later.
1307 */
1308static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
1309 uint64_t chunk, uint64_t wr_id)
1310{
1311 if (rdma->unregistrations[rdma->unregister_next] != 0) {
733252de 1312 error_report("rdma migration: queue is full");
2da776db
MH
1313 } else {
1314 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1315
1316 if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
733252de
DDAG
1317 trace_qemu_rdma_signal_unregister_append(chunk,
1318 rdma->unregister_next);
2da776db
MH
1319
1320 rdma->unregistrations[rdma->unregister_next++] =
1321 qemu_rdma_make_wrid(wr_id, index, chunk);
1322
1323 if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
1324 rdma->unregister_next = 0;
1325 }
1326 } else {
733252de 1327 trace_qemu_rdma_signal_unregister_already(chunk);
2da776db
MH
1328 }
1329 }
1330}
1331
1332/*
1333 * Consult the connection manager to see a work request
1334 * (of any kind) has completed.
1335 * Return the work request ID that completed.
1336 */
88571882
IY
1337static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
1338 uint32_t *byte_len)
2da776db
MH
1339{
1340 int ret;
1341 struct ibv_wc wc;
1342 uint64_t wr_id;
1343
1344 ret = ibv_poll_cq(rdma->cq, 1, &wc);
1345
1346 if (!ret) {
1347 *wr_id_out = RDMA_WRID_NONE;
1348 return 0;
1349 }
1350
1351 if (ret < 0) {
733252de 1352 error_report("ibv_poll_cq return %d", ret);
2da776db
MH
1353 return ret;
1354 }
1355
1356 wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
1357
1358 if (wc.status != IBV_WC_SUCCESS) {
1359 fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n",
1360 wc.status, ibv_wc_status_str(wc.status));
1361 fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]);
1362
1363 return -1;
1364 }
1365
1366 if (rdma->control_ready_expected &&
1367 (wr_id >= RDMA_WRID_RECV_CONTROL)) {
733252de 1368 trace_qemu_rdma_poll_recv(wrid_desc[RDMA_WRID_RECV_CONTROL],
2da776db
MH
1369 wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
1370 rdma->control_ready_expected = 0;
1371 }
1372
1373 if (wr_id == RDMA_WRID_RDMA_WRITE) {
1374 uint64_t chunk =
1375 (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1376 uint64_t index =
1377 (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1378 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1379
733252de
DDAG
1380 trace_qemu_rdma_poll_write(print_wrid(wr_id), wr_id, rdma->nb_sent,
1381 index, chunk,
2da776db
MH
1382 block->local_host_addr, (void *)block->remote_host_addr);
1383
1384 clear_bit(chunk, block->transit_bitmap);
1385
1386 if (rdma->nb_sent > 0) {
1387 rdma->nb_sent--;
1388 }
1389
1390 if (!rdma->pin_all) {
1391 /*
1392 * FYI: If one wanted to signal a specific chunk to be unregistered
1393 * using LRU or workload-specific information, this is the function
1394 * you would call to do so. That chunk would then get asynchronously
1395 * unregistered later.
1396 */
1397#ifdef RDMA_UNREGISTRATION_EXAMPLE
1398 qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id);
1399#endif
1400 }
1401 } else {
733252de 1402 trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent);
2da776db
MH
1403 }
1404
1405 *wr_id_out = wc.wr_id;
88571882
IY
1406 if (byte_len) {
1407 *byte_len = wc.byte_len;
1408 }
2da776db
MH
1409
1410 return 0;
1411}
1412
1413/*
1414 * Block until the next work request has completed.
1415 *
1416 * First poll to see if a work request has already completed,
1417 * otherwise block.
1418 *
1419 * If we encounter completed work requests for IDs other than
1420 * the one we're interested in, then that's generally an error.
1421 *
1422 * The only exception is actual RDMA Write completions. These
1423 * completions only need to be recorded, but do not actually
1424 * need further processing.
1425 */
88571882
IY
1426static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
1427 uint32_t *byte_len)
2da776db
MH
1428{
1429 int num_cq_events = 0, ret = 0;
1430 struct ibv_cq *cq;
1431 void *cq_ctx;
1432 uint64_t wr_id = RDMA_WRID_NONE, wr_id_in;
1433
1434 if (ibv_req_notify_cq(rdma->cq, 0)) {
1435 return -1;
1436 }
1437 /* poll cq first */
1438 while (wr_id != wrid_requested) {
88571882 1439 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1440 if (ret < 0) {
1441 return ret;
1442 }
1443
1444 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1445
1446 if (wr_id == RDMA_WRID_NONE) {
1447 break;
1448 }
1449 if (wr_id != wrid_requested) {
733252de
DDAG
1450 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1451 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1452 }
1453 }
1454
1455 if (wr_id == wrid_requested) {
1456 return 0;
1457 }
1458
1459 while (1) {
1460 /*
1461 * Coroutine doesn't start until process_incoming_migration()
1462 * so don't yield unless we know we're running inside of a coroutine.
1463 */
1464 if (rdma->migration_started_on_destination) {
1465 yield_until_fd_readable(rdma->comp_channel->fd);
1466 }
1467
1468 if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) {
1469 perror("ibv_get_cq_event");
1470 goto err_block_for_wrid;
1471 }
1472
1473 num_cq_events++;
1474
1475 if (ibv_req_notify_cq(cq, 0)) {
1476 goto err_block_for_wrid;
1477 }
1478
1479 while (wr_id != wrid_requested) {
88571882 1480 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1481 if (ret < 0) {
1482 goto err_block_for_wrid;
1483 }
1484
1485 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1486
1487 if (wr_id == RDMA_WRID_NONE) {
1488 break;
1489 }
1490 if (wr_id != wrid_requested) {
733252de
DDAG
1491 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1492 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1493 }
1494 }
1495
1496 if (wr_id == wrid_requested) {
1497 goto success_block_for_wrid;
1498 }
1499 }
1500
1501success_block_for_wrid:
1502 if (num_cq_events) {
1503 ibv_ack_cq_events(cq, num_cq_events);
1504 }
1505 return 0;
1506
1507err_block_for_wrid:
1508 if (num_cq_events) {
1509 ibv_ack_cq_events(cq, num_cq_events);
1510 }
1511 return ret;
1512}
1513
1514/*
1515 * Post a SEND message work request for the control channel
1516 * containing some data and block until the post completes.
1517 */
1518static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
1519 RDMAControlHeader *head)
1520{
1521 int ret = 0;
1f22364b 1522 RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL];
2da776db
MH
1523 struct ibv_send_wr *bad_wr;
1524 struct ibv_sge sge = {
1525 .addr = (uint64_t)(wr->control),
1526 .length = head->len + sizeof(RDMAControlHeader),
1527 .lkey = wr->control_mr->lkey,
1528 };
1529 struct ibv_send_wr send_wr = {
1530 .wr_id = RDMA_WRID_SEND_CONTROL,
1531 .opcode = IBV_WR_SEND,
1532 .send_flags = IBV_SEND_SIGNALED,
1533 .sg_list = &sge,
1534 .num_sge = 1,
1535 };
1536
733252de 1537 trace_qemu_rdma_post_send_control(control_desc[head->type]);
2da776db
MH
1538
1539 /*
1540 * We don't actually need to do a memcpy() in here if we used
1541 * the "sge" properly, but since we're only sending control messages
1542 * (not RAM in a performance-critical path), then its OK for now.
1543 *
1544 * The copy makes the RDMAControlHeader simpler to manipulate
1545 * for the time being.
1546 */
6f1484ed 1547 assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head));
2da776db
MH
1548 memcpy(wr->control, head, sizeof(RDMAControlHeader));
1549 control_to_network((void *) wr->control);
1550
1551 if (buf) {
1552 memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len);
1553 }
1554
1555
e325b49a 1556 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
2da776db 1557
e325b49a 1558 if (ret > 0) {
733252de 1559 error_report("Failed to use post IB SEND for control");
e325b49a 1560 return -ret;
2da776db
MH
1561 }
1562
88571882 1563 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
2da776db 1564 if (ret < 0) {
733252de 1565 error_report("rdma migration: send polling control error");
2da776db
MH
1566 }
1567
1568 return ret;
1569}
1570
1571/*
1572 * Post a RECV work request in anticipation of some future receipt
1573 * of data on the control channel.
1574 */
1575static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx)
1576{
1577 struct ibv_recv_wr *bad_wr;
1578 struct ibv_sge sge = {
1579 .addr = (uint64_t)(rdma->wr_data[idx].control),
1580 .length = RDMA_CONTROL_MAX_BUFFER,
1581 .lkey = rdma->wr_data[idx].control_mr->lkey,
1582 };
1583
1584 struct ibv_recv_wr recv_wr = {
1585 .wr_id = RDMA_WRID_RECV_CONTROL + idx,
1586 .sg_list = &sge,
1587 .num_sge = 1,
1588 };
1589
1590
1591 if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) {
1592 return -1;
1593 }
1594
1595 return 0;
1596}
1597
1598/*
1599 * Block and wait for a RECV control channel message to arrive.
1600 */
1601static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
1602 RDMAControlHeader *head, int expecting, int idx)
1603{
88571882
IY
1604 uint32_t byte_len;
1605 int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
1606 &byte_len);
2da776db
MH
1607
1608 if (ret < 0) {
733252de 1609 error_report("rdma migration: recv polling control error!");
2da776db
MH
1610 return ret;
1611 }
1612
1613 network_to_control((void *) rdma->wr_data[idx].control);
1614 memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
1615
733252de 1616 trace_qemu_rdma_exchange_get_response_start(control_desc[expecting]);
2da776db
MH
1617
1618 if (expecting == RDMA_CONTROL_NONE) {
733252de
DDAG
1619 trace_qemu_rdma_exchange_get_response_none(control_desc[head->type],
1620 head->type);
2da776db 1621 } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
733252de
DDAG
1622 error_report("Was expecting a %s (%d) control message"
1623 ", but got: %s (%d), length: %d",
2da776db
MH
1624 control_desc[expecting], expecting,
1625 control_desc[head->type], head->type, head->len);
1626 return -EIO;
1627 }
6f1484ed 1628 if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
81b07353 1629 error_report("too long length: %d", head->len);
6f1484ed
IY
1630 return -EINVAL;
1631 }
88571882 1632 if (sizeof(*head) + head->len != byte_len) {
733252de 1633 error_report("Malformed length: %d byte_len %d", head->len, byte_len);
88571882
IY
1634 return -EINVAL;
1635 }
2da776db
MH
1636
1637 return 0;
1638}
1639
1640/*
1641 * When a RECV work request has completed, the work request's
1642 * buffer is pointed at the header.
1643 *
1644 * This will advance the pointer to the data portion
1645 * of the control message of the work request's buffer that
1646 * was populated after the work request finished.
1647 */
1648static void qemu_rdma_move_header(RDMAContext *rdma, int idx,
1649 RDMAControlHeader *head)
1650{
1651 rdma->wr_data[idx].control_len = head->len;
1652 rdma->wr_data[idx].control_curr =
1653 rdma->wr_data[idx].control + sizeof(RDMAControlHeader);
1654}
1655
1656/*
1657 * This is an 'atomic' high-level operation to deliver a single, unified
1658 * control-channel message.
1659 *
1660 * Additionally, if the user is expecting some kind of reply to this message,
1661 * they can request a 'resp' response message be filled in by posting an
1662 * additional work request on behalf of the user and waiting for an additional
1663 * completion.
1664 *
1665 * The extra (optional) response is used during registration to us from having
1666 * to perform an *additional* exchange of message just to provide a response by
1667 * instead piggy-backing on the acknowledgement.
1668 */
1669static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
1670 uint8_t *data, RDMAControlHeader *resp,
1671 int *resp_idx,
1672 int (*callback)(RDMAContext *rdma))
1673{
1674 int ret = 0;
1675
1676 /*
1677 * Wait until the dest is ready before attempting to deliver the message
1678 * by waiting for a READY message.
1679 */
1680 if (rdma->control_ready_expected) {
1681 RDMAControlHeader resp;
1682 ret = qemu_rdma_exchange_get_response(rdma,
1683 &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
1684 if (ret < 0) {
1685 return ret;
1686 }
1687 }
1688
1689 /*
1690 * If the user is expecting a response, post a WR in anticipation of it.
1691 */
1692 if (resp) {
1693 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
1694 if (ret) {
733252de 1695 error_report("rdma migration: error posting"
2da776db
MH
1696 " extra control recv for anticipated result!");
1697 return ret;
1698 }
1699 }
1700
1701 /*
1702 * Post a WR to replace the one we just consumed for the READY message.
1703 */
1704 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1705 if (ret) {
733252de 1706 error_report("rdma migration: error posting first control recv!");
2da776db
MH
1707 return ret;
1708 }
1709
1710 /*
1711 * Deliver the control message that was requested.
1712 */
1713 ret = qemu_rdma_post_send_control(rdma, data, head);
1714
1715 if (ret < 0) {
733252de 1716 error_report("Failed to send control buffer!");
2da776db
MH
1717 return ret;
1718 }
1719
1720 /*
1721 * If we're expecting a response, block and wait for it.
1722 */
1723 if (resp) {
1724 if (callback) {
733252de 1725 trace_qemu_rdma_exchange_send_issue_callback();
2da776db
MH
1726 ret = callback(rdma);
1727 if (ret < 0) {
1728 return ret;
1729 }
1730 }
1731
733252de 1732 trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]);
2da776db
MH
1733 ret = qemu_rdma_exchange_get_response(rdma, resp,
1734 resp->type, RDMA_WRID_DATA);
1735
1736 if (ret < 0) {
1737 return ret;
1738 }
1739
1740 qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
1741 if (resp_idx) {
1742 *resp_idx = RDMA_WRID_DATA;
1743 }
733252de 1744 trace_qemu_rdma_exchange_send_received(control_desc[resp->type]);
2da776db
MH
1745 }
1746
1747 rdma->control_ready_expected = 1;
1748
1749 return 0;
1750}
1751
1752/*
1753 * This is an 'atomic' high-level operation to receive a single, unified
1754 * control-channel message.
1755 */
1756static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
1757 int expecting)
1758{
1759 RDMAControlHeader ready = {
1760 .len = 0,
1761 .type = RDMA_CONTROL_READY,
1762 .repeat = 1,
1763 };
1764 int ret;
1765
1766 /*
1767 * Inform the source that we're ready to receive a message.
1768 */
1769 ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
1770
1771 if (ret < 0) {
733252de 1772 error_report("Failed to send control buffer!");
2da776db
MH
1773 return ret;
1774 }
1775
1776 /*
1777 * Block and wait for the message.
1778 */
1779 ret = qemu_rdma_exchange_get_response(rdma, head,
1780 expecting, RDMA_WRID_READY);
1781
1782 if (ret < 0) {
1783 return ret;
1784 }
1785
1786 qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
1787
1788 /*
1789 * Post a new RECV work request to replace the one we just consumed.
1790 */
1791 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1792 if (ret) {
733252de 1793 error_report("rdma migration: error posting second control recv!");
2da776db
MH
1794 return ret;
1795 }
1796
1797 return 0;
1798}
1799
1800/*
1801 * Write an actual chunk of memory using RDMA.
1802 *
1803 * If we're using dynamic registration on the dest-side, we have to
1804 * send a registration command first.
1805 */
1806static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
1807 int current_index, uint64_t current_addr,
1808 uint64_t length)
1809{
1810 struct ibv_sge sge;
1811 struct ibv_send_wr send_wr = { 0 };
1812 struct ibv_send_wr *bad_wr;
1813 int reg_result_idx, ret, count = 0;
1814 uint64_t chunk, chunks;
1815 uint8_t *chunk_start, *chunk_end;
1816 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
1817 RDMARegister reg;
1818 RDMARegisterResult *reg_result;
1819 RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
1820 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1821 .type = RDMA_CONTROL_REGISTER_REQUEST,
1822 .repeat = 1,
1823 };
1824
1825retry:
1826 sge.addr = (uint64_t)(block->local_host_addr +
1827 (current_addr - block->offset));
1828 sge.length = length;
1829
1830 chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr);
1831 chunk_start = ram_chunk_start(block, chunk);
1832
1833 if (block->is_ram_block) {
1834 chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
1835
1836 if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
1837 chunks--;
1838 }
1839 } else {
1840 chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
1841
1842 if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
1843 chunks--;
1844 }
1845 }
1846
733252de
DDAG
1847 trace_qemu_rdma_write_one_top(chunks + 1,
1848 (chunks + 1) *
1849 (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
2da776db
MH
1850
1851 chunk_end = ram_chunk_end(block, chunk + chunks);
1852
1853 if (!rdma->pin_all) {
1854#ifdef RDMA_UNREGISTRATION_EXAMPLE
1855 qemu_rdma_unregister_waiting(rdma);
1856#endif
1857 }
1858
1859 while (test_bit(chunk, block->transit_bitmap)) {
1860 (void)count;
733252de 1861 trace_qemu_rdma_write_one_block(count++, current_index, chunk,
2da776db
MH
1862 sge.addr, length, rdma->nb_sent, block->nb_chunks);
1863
88571882 1864 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db
MH
1865
1866 if (ret < 0) {
733252de 1867 error_report("Failed to Wait for previous write to complete "
2da776db 1868 "block %d chunk %" PRIu64
733252de 1869 " current %" PRIu64 " len %" PRIu64 " %d",
2da776db
MH
1870 current_index, chunk, sge.addr, length, rdma->nb_sent);
1871 return ret;
1872 }
1873 }
1874
1875 if (!rdma->pin_all || !block->is_ram_block) {
1876 if (!block->remote_keys[chunk]) {
1877 /*
1878 * This chunk has not yet been registered, so first check to see
1879 * if the entire chunk is zero. If so, tell the other size to
1880 * memset() + madvise() the entire chunk without RDMA.
1881 */
1882
1883 if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length)
1884 && buffer_find_nonzero_offset((void *)sge.addr,
1885 length) == length) {
1886 RDMACompress comp = {
1887 .offset = current_addr,
1888 .value = 0,
1889 .block_idx = current_index,
1890 .length = length,
1891 };
1892
1893 head.len = sizeof(comp);
1894 head.type = RDMA_CONTROL_COMPRESS;
1895
733252de
DDAG
1896 trace_qemu_rdma_write_one_zero(chunk, sge.length,
1897 current_index, current_addr);
2da776db
MH
1898
1899 compress_to_network(&comp);
1900 ret = qemu_rdma_exchange_send(rdma, &head,
1901 (uint8_t *) &comp, NULL, NULL, NULL);
1902
1903 if (ret < 0) {
1904 return -EIO;
1905 }
1906
1907 acct_update_position(f, sge.length, true);
1908
1909 return 1;
1910 }
1911
1912 /*
1913 * Otherwise, tell other side to register.
1914 */
1915 reg.current_index = current_index;
1916 if (block->is_ram_block) {
1917 reg.key.current_addr = current_addr;
1918 } else {
1919 reg.key.chunk = chunk;
1920 }
1921 reg.chunks = chunks;
1922
733252de
DDAG
1923 trace_qemu_rdma_write_one_sendreg(chunk, sge.length, current_index,
1924 current_addr);
2da776db
MH
1925
1926 register_to_network(&reg);
1927 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
1928 &resp, &reg_result_idx, NULL);
1929 if (ret < 0) {
1930 return ret;
1931 }
1932
1933 /* try to overlap this single registration with the one we sent. */
1934 if (qemu_rdma_register_and_get_keys(rdma, block,
1935 (uint8_t *) sge.addr,
1936 &sge.lkey, NULL, chunk,
1937 chunk_start, chunk_end)) {
733252de 1938 error_report("cannot get lkey");
2da776db
MH
1939 return -EINVAL;
1940 }
1941
1942 reg_result = (RDMARegisterResult *)
1943 rdma->wr_data[reg_result_idx].control_curr;
1944
1945 network_to_result(reg_result);
1946
733252de
DDAG
1947 trace_qemu_rdma_write_one_recvregres(block->remote_keys[chunk],
1948 reg_result->rkey, chunk);
2da776db
MH
1949
1950 block->remote_keys[chunk] = reg_result->rkey;
1951 block->remote_host_addr = reg_result->host_addr;
1952 } else {
1953 /* already registered before */
1954 if (qemu_rdma_register_and_get_keys(rdma, block,
1955 (uint8_t *)sge.addr,
1956 &sge.lkey, NULL, chunk,
1957 chunk_start, chunk_end)) {
733252de 1958 error_report("cannot get lkey!");
2da776db
MH
1959 return -EINVAL;
1960 }
1961 }
1962
1963 send_wr.wr.rdma.rkey = block->remote_keys[chunk];
1964 } else {
1965 send_wr.wr.rdma.rkey = block->remote_rkey;
1966
1967 if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
1968 &sge.lkey, NULL, chunk,
1969 chunk_start, chunk_end)) {
733252de 1970 error_report("cannot get lkey!");
2da776db
MH
1971 return -EINVAL;
1972 }
1973 }
1974
1975 /*
1976 * Encode the ram block index and chunk within this wrid.
1977 * We will use this information at the time of completion
1978 * to figure out which bitmap to check against and then which
1979 * chunk in the bitmap to look for.
1980 */
1981 send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
1982 current_index, chunk);
1983
1984 send_wr.opcode = IBV_WR_RDMA_WRITE;
1985 send_wr.send_flags = IBV_SEND_SIGNALED;
1986 send_wr.sg_list = &sge;
1987 send_wr.num_sge = 1;
1988 send_wr.wr.rdma.remote_addr = block->remote_host_addr +
1989 (current_addr - block->offset);
1990
733252de
DDAG
1991 trace_qemu_rdma_write_one_post(chunk, sge.addr, send_wr.wr.rdma.remote_addr,
1992 sge.length);
2da776db
MH
1993
1994 /*
1995 * ibv_post_send() does not return negative error numbers,
1996 * per the specification they are positive - no idea why.
1997 */
1998 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
1999
2000 if (ret == ENOMEM) {
733252de 2001 trace_qemu_rdma_write_one_queue_full();
88571882 2002 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2003 if (ret < 0) {
733252de
DDAG
2004 error_report("rdma migration: failed to make "
2005 "room in full send queue! %d", ret);
2da776db
MH
2006 return ret;
2007 }
2008
2009 goto retry;
2010
2011 } else if (ret > 0) {
2012 perror("rdma migration: post rdma write failed");
2013 return -ret;
2014 }
2015
2016 set_bit(chunk, block->transit_bitmap);
2017 acct_update_position(f, sge.length, false);
2018 rdma->total_writes++;
2019
2020 return 0;
2021}
2022
2023/*
2024 * Push out any unwritten RDMA operations.
2025 *
2026 * We support sending out multiple chunks at the same time.
2027 * Not all of them need to get signaled in the completion queue.
2028 */
2029static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
2030{
2031 int ret;
2032
2033 if (!rdma->current_length) {
2034 return 0;
2035 }
2036
2037 ret = qemu_rdma_write_one(f, rdma,
2038 rdma->current_index, rdma->current_addr, rdma->current_length);
2039
2040 if (ret < 0) {
2041 return ret;
2042 }
2043
2044 if (ret == 0) {
2045 rdma->nb_sent++;
733252de 2046 trace_qemu_rdma_write_flush(rdma->nb_sent);
2da776db
MH
2047 }
2048
2049 rdma->current_length = 0;
2050 rdma->current_addr = 0;
2051
2052 return 0;
2053}
2054
2055static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
2056 uint64_t offset, uint64_t len)
2057{
44b59494
IY
2058 RDMALocalBlock *block;
2059 uint8_t *host_addr;
2060 uint8_t *chunk_end;
2061
2062 if (rdma->current_index < 0) {
2063 return 0;
2064 }
2065
2066 if (rdma->current_chunk < 0) {
2067 return 0;
2068 }
2069
2070 block = &(rdma->local_ram_blocks.block[rdma->current_index]);
2071 host_addr = block->local_host_addr + (offset - block->offset);
2072 chunk_end = ram_chunk_end(block, rdma->current_chunk);
2da776db
MH
2073
2074 if (rdma->current_length == 0) {
2075 return 0;
2076 }
2077
2078 /*
2079 * Only merge into chunk sequentially.
2080 */
2081 if (offset != (rdma->current_addr + rdma->current_length)) {
2082 return 0;
2083 }
2084
2da776db
MH
2085 if (offset < block->offset) {
2086 return 0;
2087 }
2088
2089 if ((offset + len) > (block->offset + block->length)) {
2090 return 0;
2091 }
2092
2da776db
MH
2093 if ((host_addr + len) > chunk_end) {
2094 return 0;
2095 }
2096
2097 return 1;
2098}
2099
2100/*
2101 * We're not actually writing here, but doing three things:
2102 *
2103 * 1. Identify the chunk the buffer belongs to.
2104 * 2. If the chunk is full or the buffer doesn't belong to the current
2105 * chunk, then start a new chunk and flush() the old chunk.
2106 * 3. To keep the hardware busy, we also group chunks into batches
2107 * and only require that a batch gets acknowledged in the completion
2108 * qeueue instead of each individual chunk.
2109 */
2110static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
2111 uint64_t block_offset, uint64_t offset,
2112 uint64_t len)
2113{
2114 uint64_t current_addr = block_offset + offset;
2115 uint64_t index = rdma->current_index;
2116 uint64_t chunk = rdma->current_chunk;
2117 int ret;
2118
2119 /* If we cannot merge it, we flush the current buffer first. */
2120 if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
2121 ret = qemu_rdma_write_flush(f, rdma);
2122 if (ret) {
2123 return ret;
2124 }
2125 rdma->current_length = 0;
2126 rdma->current_addr = current_addr;
2127
2128 ret = qemu_rdma_search_ram_block(rdma, block_offset,
2129 offset, len, &index, &chunk);
2130 if (ret) {
733252de 2131 error_report("ram block search failed");
2da776db
MH
2132 return ret;
2133 }
2134 rdma->current_index = index;
2135 rdma->current_chunk = chunk;
2136 }
2137
2138 /* merge it */
2139 rdma->current_length += len;
2140
2141 /* flush it if buffer is too large */
2142 if (rdma->current_length >= RDMA_MERGE_MAX) {
2143 return qemu_rdma_write_flush(f, rdma);
2144 }
2145
2146 return 0;
2147}
2148
2149static void qemu_rdma_cleanup(RDMAContext *rdma)
2150{
2151 struct rdma_cm_event *cm_event;
2152 int ret, idx;
2153
5a91337c 2154 if (rdma->cm_id && rdma->connected) {
2da776db
MH
2155 if (rdma->error_state) {
2156 RDMAControlHeader head = { .len = 0,
2157 .type = RDMA_CONTROL_ERROR,
2158 .repeat = 1,
2159 };
733252de 2160 error_report("Early error. Sending error.");
2da776db
MH
2161 qemu_rdma_post_send_control(rdma, NULL, &head);
2162 }
2163
2164 ret = rdma_disconnect(rdma->cm_id);
2165 if (!ret) {
733252de 2166 trace_qemu_rdma_cleanup_waiting_for_disconnect();
2da776db
MH
2167 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2168 if (!ret) {
2169 rdma_ack_cm_event(cm_event);
2170 }
2171 }
733252de 2172 trace_qemu_rdma_cleanup_disconnect();
5a91337c 2173 rdma->connected = false;
2da776db
MH
2174 }
2175
2176 g_free(rdma->block);
2177 rdma->block = NULL;
2178
1f22364b 2179 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2180 if (rdma->wr_data[idx].control_mr) {
2181 rdma->total_registrations--;
2182 ibv_dereg_mr(rdma->wr_data[idx].control_mr);
2183 }
2184 rdma->wr_data[idx].control_mr = NULL;
2185 }
2186
2187 if (rdma->local_ram_blocks.block) {
2188 while (rdma->local_ram_blocks.nb_blocks) {
ba795761 2189 rdma_delete_block(rdma, rdma->local_ram_blocks.block->offset);
2da776db
MH
2190 }
2191 }
2192
2da776db
MH
2193 if (rdma->cq) {
2194 ibv_destroy_cq(rdma->cq);
2195 rdma->cq = NULL;
2196 }
2197 if (rdma->comp_channel) {
2198 ibv_destroy_comp_channel(rdma->comp_channel);
2199 rdma->comp_channel = NULL;
2200 }
2201 if (rdma->pd) {
2202 ibv_dealloc_pd(rdma->pd);
2203 rdma->pd = NULL;
2204 }
2205 if (rdma->listen_id) {
2206 rdma_destroy_id(rdma->listen_id);
2207 rdma->listen_id = NULL;
2208 }
2209 if (rdma->cm_id) {
e325b49a
MH
2210 if (rdma->qp) {
2211 rdma_destroy_qp(rdma->cm_id);
2212 rdma->qp = NULL;
2213 }
2da776db
MH
2214 rdma_destroy_id(rdma->cm_id);
2215 rdma->cm_id = NULL;
2216 }
2217 if (rdma->channel) {
2218 rdma_destroy_event_channel(rdma->channel);
2219 rdma->channel = NULL;
2220 }
e1d0fb37
IY
2221 g_free(rdma->host);
2222 rdma->host = NULL;
2da776db
MH
2223}
2224
2225
2226static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all)
2227{
2228 int ret, idx;
2229 Error *local_err = NULL, **temp = &local_err;
2230
2231 /*
2232 * Will be validated against destination's actual capabilities
2233 * after the connect() completes.
2234 */
2235 rdma->pin_all = pin_all;
2236
2237 ret = qemu_rdma_resolve_host(rdma, temp);
2238 if (ret) {
2239 goto err_rdma_source_init;
2240 }
2241
2242 ret = qemu_rdma_alloc_pd_cq(rdma);
2243 if (ret) {
2244 ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()"
2245 " limits may be too low. Please check $ ulimit -a # and "
66988941 2246 "search for 'ulimit -l' in the output");
2da776db
MH
2247 goto err_rdma_source_init;
2248 }
2249
2250 ret = qemu_rdma_alloc_qp(rdma);
2251 if (ret) {
66988941 2252 ERROR(temp, "rdma migration: error allocating qp!");
2da776db
MH
2253 goto err_rdma_source_init;
2254 }
2255
2256 ret = qemu_rdma_init_ram_blocks(rdma);
2257 if (ret) {
66988941 2258 ERROR(temp, "rdma migration: error initializing ram blocks!");
2da776db
MH
2259 goto err_rdma_source_init;
2260 }
2261
1f22364b 2262 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2263 ret = qemu_rdma_reg_control(rdma, idx);
2264 if (ret) {
66988941 2265 ERROR(temp, "rdma migration: error registering %d control!",
2da776db
MH
2266 idx);
2267 goto err_rdma_source_init;
2268 }
2269 }
2270
2271 return 0;
2272
2273err_rdma_source_init:
2274 error_propagate(errp, local_err);
2275 qemu_rdma_cleanup(rdma);
2276 return -1;
2277}
2278
2279static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
2280{
2281 RDMACapabilities cap = {
2282 .version = RDMA_CONTROL_VERSION_CURRENT,
2283 .flags = 0,
2284 };
2285 struct rdma_conn_param conn_param = { .initiator_depth = 2,
2286 .retry_count = 5,
2287 .private_data = &cap,
2288 .private_data_len = sizeof(cap),
2289 };
2290 struct rdma_cm_event *cm_event;
2291 int ret;
2292
2293 /*
2294 * Only negotiate the capability with destination if the user
2295 * on the source first requested the capability.
2296 */
2297 if (rdma->pin_all) {
733252de 2298 trace_qemu_rdma_connect_pin_all_requested();
2da776db
MH
2299 cap.flags |= RDMA_CAPABILITY_PIN_ALL;
2300 }
2301
2302 caps_to_network(&cap);
2303
2304 ret = rdma_connect(rdma->cm_id, &conn_param);
2305 if (ret) {
2306 perror("rdma_connect");
66988941 2307 ERROR(errp, "connecting to destination!");
2da776db
MH
2308 rdma_destroy_id(rdma->cm_id);
2309 rdma->cm_id = NULL;
2310 goto err_rdma_source_connect;
2311 }
2312
2313 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2314 if (ret) {
2315 perror("rdma_get_cm_event after rdma_connect");
66988941 2316 ERROR(errp, "connecting to destination!");
2da776db
MH
2317 rdma_ack_cm_event(cm_event);
2318 rdma_destroy_id(rdma->cm_id);
2319 rdma->cm_id = NULL;
2320 goto err_rdma_source_connect;
2321 }
2322
2323 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
2324 perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
66988941 2325 ERROR(errp, "connecting to destination!");
2da776db
MH
2326 rdma_ack_cm_event(cm_event);
2327 rdma_destroy_id(rdma->cm_id);
2328 rdma->cm_id = NULL;
2329 goto err_rdma_source_connect;
2330 }
5a91337c 2331 rdma->connected = true;
2da776db
MH
2332
2333 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
2334 network_to_caps(&cap);
2335
2336 /*
2337 * Verify that the *requested* capabilities are supported by the destination
2338 * and disable them otherwise.
2339 */
2340 if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) {
2341 ERROR(errp, "Server cannot support pinning all memory. "
66988941 2342 "Will register memory dynamically.");
2da776db
MH
2343 rdma->pin_all = false;
2344 }
2345
733252de 2346 trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all);
2da776db
MH
2347
2348 rdma_ack_cm_event(cm_event);
2349
87772639 2350 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2da776db 2351 if (ret) {
66988941 2352 ERROR(errp, "posting second control recv!");
2da776db
MH
2353 goto err_rdma_source_connect;
2354 }
2355
2356 rdma->control_ready_expected = 1;
2357 rdma->nb_sent = 0;
2358 return 0;
2359
2360err_rdma_source_connect:
2361 qemu_rdma_cleanup(rdma);
2362 return -1;
2363}
2364
2365static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
2366{
2367 int ret = -EINVAL, idx;
2da776db
MH
2368 struct rdma_cm_id *listen_id;
2369 char ip[40] = "unknown";
7fc5b13f 2370 struct rdma_addrinfo *res;
b58c8552 2371 char port_str[16];
2da776db 2372
1f22364b 2373 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2374 rdma->wr_data[idx].control_len = 0;
2375 rdma->wr_data[idx].control_curr = NULL;
2376 }
2377
2378 if (rdma->host == NULL) {
66988941 2379 ERROR(errp, "RDMA host is not set!");
2da776db
MH
2380 rdma->error_state = -EINVAL;
2381 return -1;
2382 }
2383 /* create CM channel */
2384 rdma->channel = rdma_create_event_channel();
2385 if (!rdma->channel) {
66988941 2386 ERROR(errp, "could not create rdma event channel");
2da776db
MH
2387 rdma->error_state = -EINVAL;
2388 return -1;
2389 }
2390
2391 /* create CM id */
2392 ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
2393 if (ret) {
66988941 2394 ERROR(errp, "could not create cm_id!");
2da776db
MH
2395 goto err_dest_init_create_listen_id;
2396 }
2397
b58c8552
MH
2398 snprintf(port_str, 16, "%d", rdma->port);
2399 port_str[15] = '\0';
2da776db
MH
2400
2401 if (rdma->host && strcmp("", rdma->host)) {
7fc5b13f 2402 struct rdma_addrinfo *e;
6470215b 2403
7fc5b13f 2404 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
b58c8552 2405 if (ret < 0) {
7fc5b13f 2406 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2da776db
MH
2407 goto err_dest_init_bind_addr;
2408 }
b58c8552 2409
6470215b
MH
2410 for (e = res; e != NULL; e = e->ai_next) {
2411 inet_ntop(e->ai_family,
7fc5b13f 2412 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
733252de 2413 trace_qemu_rdma_dest_init_trying(rdma->host, ip);
7fc5b13f 2414 ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
6470215b 2415 if (!ret) {
7fc5b13f
MH
2416 if (e->ai_family == AF_INET6) {
2417 ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs);
2418 if (ret) {
2419 continue;
2420 }
2421 }
2422
6470215b
MH
2423 goto listen;
2424 }
2425 }
b58c8552 2426
6470215b
MH
2427 ERROR(errp, "Error: could not rdma_bind_addr!");
2428 goto err_dest_init_bind_addr;
2da776db 2429 } else {
66988941 2430 ERROR(errp, "migration host and port not specified!");
b58c8552
MH
2431 ret = -EINVAL;
2432 goto err_dest_init_bind_addr;
2da776db 2433 }
6470215b 2434listen:
2da776db
MH
2435
2436 rdma->listen_id = listen_id;
2437 qemu_rdma_dump_gid("dest_init", listen_id);
2438 return 0;
2439
2440err_dest_init_bind_addr:
2441 rdma_destroy_id(listen_id);
2442err_dest_init_create_listen_id:
2443 rdma_destroy_event_channel(rdma->channel);
2444 rdma->channel = NULL;
2445 rdma->error_state = ret;
2446 return ret;
2447
2448}
2449
2450static void *qemu_rdma_data_init(const char *host_port, Error **errp)
2451{
2452 RDMAContext *rdma = NULL;
2453 InetSocketAddress *addr;
2454
2455 if (host_port) {
2456 rdma = g_malloc0(sizeof(RDMAContext));
2457 memset(rdma, 0, sizeof(RDMAContext));
2458 rdma->current_index = -1;
2459 rdma->current_chunk = -1;
2460
2461 addr = inet_parse(host_port, NULL);
2462 if (addr != NULL) {
2463 rdma->port = atoi(addr->port);
2464 rdma->host = g_strdup(addr->host);
2465 } else {
2466 ERROR(errp, "bad RDMA migration address '%s'", host_port);
2467 g_free(rdma);
e325b49a 2468 rdma = NULL;
2da776db 2469 }
e325b49a
MH
2470
2471 qapi_free_InetSocketAddress(addr);
2da776db
MH
2472 }
2473
2474 return rdma;
2475}
2476
2477/*
2478 * QEMUFile interface to the control channel.
2479 * SEND messages for control only.
971ae6ef 2480 * VM's ram is handled with regular RDMA messages.
2da776db
MH
2481 */
2482static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf,
2483 int64_t pos, int size)
2484{
2485 QEMUFileRDMA *r = opaque;
2486 QEMUFile *f = r->file;
2487 RDMAContext *rdma = r->rdma;
2488 size_t remaining = size;
2489 uint8_t * data = (void *) buf;
2490 int ret;
2491
2492 CHECK_ERROR_STATE();
2493
2494 /*
2495 * Push out any writes that
971ae6ef 2496 * we're queued up for VM's ram.
2da776db
MH
2497 */
2498 ret = qemu_rdma_write_flush(f, rdma);
2499 if (ret < 0) {
2500 rdma->error_state = ret;
2501 return ret;
2502 }
2503
2504 while (remaining) {
2505 RDMAControlHeader head;
2506
2507 r->len = MIN(remaining, RDMA_SEND_INCREMENT);
2508 remaining -= r->len;
2509
2510 head.len = r->len;
2511 head.type = RDMA_CONTROL_QEMU_FILE;
2512
2513 ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
2514
2515 if (ret < 0) {
2516 rdma->error_state = ret;
2517 return ret;
2518 }
2519
2520 data += r->len;
2521 }
2522
2523 return size;
2524}
2525
2526static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
2527 int size, int idx)
2528{
2529 size_t len = 0;
2530
2531 if (rdma->wr_data[idx].control_len) {
733252de 2532 trace_qemu_rdma_fill(rdma->wr_data[idx].control_len, size);
2da776db
MH
2533
2534 len = MIN(size, rdma->wr_data[idx].control_len);
2535 memcpy(buf, rdma->wr_data[idx].control_curr, len);
2536 rdma->wr_data[idx].control_curr += len;
2537 rdma->wr_data[idx].control_len -= len;
2538 }
2539
2540 return len;
2541}
2542
2543/*
2544 * QEMUFile interface to the control channel.
2545 * RDMA links don't use bytestreams, so we have to
2546 * return bytes to QEMUFile opportunistically.
2547 */
2548static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf,
2549 int64_t pos, int size)
2550{
2551 QEMUFileRDMA *r = opaque;
2552 RDMAContext *rdma = r->rdma;
2553 RDMAControlHeader head;
2554 int ret = 0;
2555
2556 CHECK_ERROR_STATE();
2557
2558 /*
2559 * First, we hold on to the last SEND message we
2560 * were given and dish out the bytes until we run
2561 * out of bytes.
2562 */
2563 r->len = qemu_rdma_fill(r->rdma, buf, size, 0);
2564 if (r->len) {
2565 return r->len;
2566 }
2567
2568 /*
2569 * Once we run out, we block and wait for another
2570 * SEND message to arrive.
2571 */
2572 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE);
2573
2574 if (ret < 0) {
2575 rdma->error_state = ret;
2576 return ret;
2577 }
2578
2579 /*
2580 * SEND was received with new bytes, now try again.
2581 */
2582 return qemu_rdma_fill(r->rdma, buf, size, 0);
2583}
2584
2585/*
2586 * Block until all the outstanding chunks have been delivered by the hardware.
2587 */
2588static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
2589{
2590 int ret;
2591
2592 if (qemu_rdma_write_flush(f, rdma) < 0) {
2593 return -EIO;
2594 }
2595
2596 while (rdma->nb_sent) {
88571882 2597 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2598 if (ret < 0) {
733252de 2599 error_report("rdma migration: complete polling error!");
2da776db
MH
2600 return -EIO;
2601 }
2602 }
2603
2604 qemu_rdma_unregister_waiting(rdma);
2605
2606 return 0;
2607}
2608
2609static int qemu_rdma_close(void *opaque)
2610{
733252de 2611 trace_qemu_rdma_close();
2da776db
MH
2612 QEMUFileRDMA *r = opaque;
2613 if (r->rdma) {
2614 qemu_rdma_cleanup(r->rdma);
2615 g_free(r->rdma);
2616 }
2617 g_free(r);
2618 return 0;
2619}
2620
2621/*
2622 * Parameters:
2623 * @offset == 0 :
2624 * This means that 'block_offset' is a full virtual address that does not
2625 * belong to a RAMBlock of the virtual machine and instead
2626 * represents a private malloc'd memory area that the caller wishes to
2627 * transfer.
2628 *
2629 * @offset != 0 :
2630 * Offset is an offset to be added to block_offset and used
2631 * to also lookup the corresponding RAMBlock.
2632 *
2633 * @size > 0 :
2634 * Initiate an transfer this size.
2635 *
2636 * @size == 0 :
2637 * A 'hint' or 'advice' that means that we wish to speculatively
2638 * and asynchronously unregister this memory. In this case, there is no
52f35022 2639 * guarantee that the unregister will actually happen, for example,
2da776db
MH
2640 * if the memory is being actively transmitted. Additionally, the memory
2641 * may be re-registered at any future time if a write within the same
2642 * chunk was requested again, even if you attempted to unregister it
2643 * here.
2644 *
2645 * @size < 0 : TODO, not yet supported
2646 * Unregister the memory NOW. This means that the caller does not
2647 * expect there to be any future RDMA transfers and we just want to clean
2648 * things up. This is used in case the upper layer owns the memory and
2649 * cannot wait for qemu_fclose() to occur.
2650 *
2651 * @bytes_sent : User-specificed pointer to indicate how many bytes were
2652 * sent. Usually, this will not be more than a few bytes of
2653 * the protocol because most transfers are sent asynchronously.
2654 */
2655static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
2656 ram_addr_t block_offset, ram_addr_t offset,
6e1dea46 2657 size_t size, uint64_t *bytes_sent)
2da776db
MH
2658{
2659 QEMUFileRDMA *rfile = opaque;
2660 RDMAContext *rdma = rfile->rdma;
2661 int ret;
2662
2663 CHECK_ERROR_STATE();
2664
2665 qemu_fflush(f);
2666
2667 if (size > 0) {
2668 /*
2669 * Add this page to the current 'chunk'. If the chunk
2670 * is full, or the page doen't belong to the current chunk,
2671 * an actual RDMA write will occur and a new chunk will be formed.
2672 */
2673 ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
2674 if (ret < 0) {
733252de 2675 error_report("rdma migration: write error! %d", ret);
2da776db
MH
2676 goto err;
2677 }
2678
2679 /*
2680 * We always return 1 bytes because the RDMA
2681 * protocol is completely asynchronous. We do not yet know
2682 * whether an identified chunk is zero or not because we're
2683 * waiting for other pages to potentially be merged with
2684 * the current chunk. So, we have to call qemu_update_position()
2685 * later on when the actual write occurs.
2686 */
2687 if (bytes_sent) {
2688 *bytes_sent = 1;
2689 }
2690 } else {
2691 uint64_t index, chunk;
2692
2693 /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
2694 if (size < 0) {
2695 ret = qemu_rdma_drain_cq(f, rdma);
2696 if (ret < 0) {
2697 fprintf(stderr, "rdma: failed to synchronously drain"
2698 " completion queue before unregistration.\n");
2699 goto err;
2700 }
2701 }
2702 */
2703
2704 ret = qemu_rdma_search_ram_block(rdma, block_offset,
2705 offset, size, &index, &chunk);
2706
2707 if (ret) {
733252de 2708 error_report("ram block search failed");
2da776db
MH
2709 goto err;
2710 }
2711
2712 qemu_rdma_signal_unregister(rdma, index, chunk, 0);
2713
2714 /*
52f35022 2715 * TODO: Synchronous, guaranteed unregistration (should not occur during
2da776db
MH
2716 * fast-path). Otherwise, unregisters will process on the next call to
2717 * qemu_rdma_drain_cq()
2718 if (size < 0) {
2719 qemu_rdma_unregister_waiting(rdma);
2720 }
2721 */
2722 }
2723
2724 /*
2725 * Drain the Completion Queue if possible, but do not block,
2726 * just poll.
2727 *
2728 * If nothing to poll, the end of the iteration will do this
2729 * again to make sure we don't overflow the request queue.
2730 */
2731 while (1) {
2732 uint64_t wr_id, wr_id_in;
88571882 2733 int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
2da776db 2734 if (ret < 0) {
733252de 2735 error_report("rdma migration: polling error! %d", ret);
2da776db
MH
2736 goto err;
2737 }
2738
2739 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
2740
2741 if (wr_id == RDMA_WRID_NONE) {
2742 break;
2743 }
2744 }
2745
2746 return RAM_SAVE_CONTROL_DELAYED;
2747err:
2748 rdma->error_state = ret;
2749 return ret;
2750}
2751
2752static int qemu_rdma_accept(RDMAContext *rdma)
2753{
2754 RDMACapabilities cap;
2755 struct rdma_conn_param conn_param = {
2756 .responder_resources = 2,
2757 .private_data = &cap,
2758 .private_data_len = sizeof(cap),
2759 };
2760 struct rdma_cm_event *cm_event;
2761 struct ibv_context *verbs;
2762 int ret = -EINVAL;
2763 int idx;
2764
2765 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2766 if (ret) {
2767 goto err_rdma_dest_wait;
2768 }
2769
2770 if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
2771 rdma_ack_cm_event(cm_event);
2772 goto err_rdma_dest_wait;
2773 }
2774
2775 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
2776
2777 network_to_caps(&cap);
2778
2779 if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
733252de 2780 error_report("Unknown source RDMA version: %d, bailing...",
2da776db
MH
2781 cap.version);
2782 rdma_ack_cm_event(cm_event);
2783 goto err_rdma_dest_wait;
2784 }
2785
2786 /*
2787 * Respond with only the capabilities this version of QEMU knows about.
2788 */
2789 cap.flags &= known_capabilities;
2790
2791 /*
2792 * Enable the ones that we do know about.
2793 * Add other checks here as new ones are introduced.
2794 */
2795 if (cap.flags & RDMA_CAPABILITY_PIN_ALL) {
2796 rdma->pin_all = true;
2797 }
2798
2799 rdma->cm_id = cm_event->id;
2800 verbs = cm_event->id->verbs;
2801
2802 rdma_ack_cm_event(cm_event);
2803
733252de 2804 trace_qemu_rdma_accept_pin_state(rdma->pin_all);
2da776db
MH
2805
2806 caps_to_network(&cap);
2807
733252de 2808 trace_qemu_rdma_accept_pin_verbsc(verbs);
2da776db
MH
2809
2810 if (!rdma->verbs) {
2811 rdma->verbs = verbs;
2812 } else if (rdma->verbs != verbs) {
733252de
DDAG
2813 error_report("ibv context not matching %p, %p!", rdma->verbs,
2814 verbs);
2da776db
MH
2815 goto err_rdma_dest_wait;
2816 }
2817
2818 qemu_rdma_dump_id("dest_init", verbs);
2819
2820 ret = qemu_rdma_alloc_pd_cq(rdma);
2821 if (ret) {
733252de 2822 error_report("rdma migration: error allocating pd and cq!");
2da776db
MH
2823 goto err_rdma_dest_wait;
2824 }
2825
2826 ret = qemu_rdma_alloc_qp(rdma);
2827 if (ret) {
733252de 2828 error_report("rdma migration: error allocating qp!");
2da776db
MH
2829 goto err_rdma_dest_wait;
2830 }
2831
2832 ret = qemu_rdma_init_ram_blocks(rdma);
2833 if (ret) {
733252de 2834 error_report("rdma migration: error initializing ram blocks!");
2da776db
MH
2835 goto err_rdma_dest_wait;
2836 }
2837
1f22364b 2838 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2839 ret = qemu_rdma_reg_control(rdma, idx);
2840 if (ret) {
733252de 2841 error_report("rdma: error registering %d control", idx);
2da776db
MH
2842 goto err_rdma_dest_wait;
2843 }
2844 }
2845
2846 qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL);
2847
2848 ret = rdma_accept(rdma->cm_id, &conn_param);
2849 if (ret) {
733252de 2850 error_report("rdma_accept returns %d", ret);
2da776db
MH
2851 goto err_rdma_dest_wait;
2852 }
2853
2854 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2855 if (ret) {
733252de 2856 error_report("rdma_accept get_cm_event failed %d", ret);
2da776db
MH
2857 goto err_rdma_dest_wait;
2858 }
2859
2860 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
733252de 2861 error_report("rdma_accept not event established");
2da776db
MH
2862 rdma_ack_cm_event(cm_event);
2863 goto err_rdma_dest_wait;
2864 }
2865
2866 rdma_ack_cm_event(cm_event);
5a91337c 2867 rdma->connected = true;
2da776db 2868
87772639 2869 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2da776db 2870 if (ret) {
733252de 2871 error_report("rdma migration: error posting second control recv");
2da776db
MH
2872 goto err_rdma_dest_wait;
2873 }
2874
2875 qemu_rdma_dump_gid("dest_connect", rdma->cm_id);
2876
2877 return 0;
2878
2879err_rdma_dest_wait:
2880 rdma->error_state = ret;
2881 qemu_rdma_cleanup(rdma);
2882 return ret;
2883}
2884
2885/*
2886 * During each iteration of the migration, we listen for instructions
2887 * by the source VM to perform dynamic page registrations before they
2888 * can perform RDMA operations.
2889 *
2890 * We respond with the 'rkey'.
2891 *
2892 * Keep doing this until the source tells us to stop.
2893 */
2894static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque,
2895 uint64_t flags)
2896{
2897 RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
2898 .type = RDMA_CONTROL_REGISTER_RESULT,
2899 .repeat = 0,
2900 };
2901 RDMAControlHeader unreg_resp = { .len = 0,
2902 .type = RDMA_CONTROL_UNREGISTER_FINISHED,
2903 .repeat = 0,
2904 };
2905 RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
2906 .repeat = 1 };
2907 QEMUFileRDMA *rfile = opaque;
2908 RDMAContext *rdma = rfile->rdma;
2909 RDMALocalBlocks *local = &rdma->local_ram_blocks;
2910 RDMAControlHeader head;
2911 RDMARegister *reg, *registers;
2912 RDMACompress *comp;
2913 RDMARegisterResult *reg_result;
2914 static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE];
2915 RDMALocalBlock *block;
2916 void *host_addr;
2917 int ret = 0;
2918 int idx = 0;
2919 int count = 0;
2920 int i = 0;
2921
2922 CHECK_ERROR_STATE();
2923
2924 do {
733252de 2925 trace_qemu_rdma_registration_handle_wait(flags);
2da776db
MH
2926
2927 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
2928
2929 if (ret < 0) {
2930 break;
2931 }
2932
2933 if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
733252de
DDAG
2934 error_report("rdma: Too many requests in this message (%d)."
2935 "Bailing.", head.repeat);
2da776db
MH
2936 ret = -EIO;
2937 break;
2938 }
2939
2940 switch (head.type) {
2941 case RDMA_CONTROL_COMPRESS:
2942 comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
2943 network_to_compress(comp);
2944
733252de
DDAG
2945 trace_qemu_rdma_registration_handle_compress(comp->length,
2946 comp->block_idx,
2947 comp->offset);
2da776db
MH
2948 block = &(rdma->local_ram_blocks.block[comp->block_idx]);
2949
2950 host_addr = block->local_host_addr +
2951 (comp->offset - block->offset);
2952
2953 ram_handle_compressed(host_addr, comp->value, comp->length);
2954 break;
2955
2956 case RDMA_CONTROL_REGISTER_FINISHED:
733252de 2957 trace_qemu_rdma_registration_handle_finished();
2da776db
MH
2958 goto out;
2959
2960 case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
733252de 2961 trace_qemu_rdma_registration_handle_ram_blocks();
2da776db
MH
2962
2963 if (rdma->pin_all) {
2964 ret = qemu_rdma_reg_whole_ram_blocks(rdma);
2965 if (ret) {
733252de
DDAG
2966 error_report("rdma migration: error dest "
2967 "registering ram blocks");
2da776db
MH
2968 goto out;
2969 }
2970 }
2971
2972 /*
2973 * Dest uses this to prepare to transmit the RAMBlock descriptions
2974 * to the source VM after connection setup.
2975 * Both sides use the "remote" structure to communicate and update
2976 * their "local" descriptions with what was sent.
2977 */
2978 for (i = 0; i < local->nb_blocks; i++) {
2979 rdma->block[i].remote_host_addr =
2980 (uint64_t)(local->block[i].local_host_addr);
2981
2982 if (rdma->pin_all) {
2983 rdma->block[i].remote_rkey = local->block[i].mr->rkey;
2984 }
2985
2986 rdma->block[i].offset = local->block[i].offset;
2987 rdma->block[i].length = local->block[i].length;
2988
2989 remote_block_to_network(&rdma->block[i]);
2990 }
2991
2992 blocks.len = rdma->local_ram_blocks.nb_blocks
2993 * sizeof(RDMARemoteBlock);
2994
2995
2996 ret = qemu_rdma_post_send_control(rdma,
2997 (uint8_t *) rdma->block, &blocks);
2998
2999 if (ret < 0) {
733252de 3000 error_report("rdma migration: error sending remote info");
2da776db
MH
3001 goto out;
3002 }
3003
3004 break;
3005 case RDMA_CONTROL_REGISTER_REQUEST:
733252de 3006 trace_qemu_rdma_registration_handle_register(head.repeat);
2da776db
MH
3007
3008 reg_resp.repeat = head.repeat;
3009 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3010
3011 for (count = 0; count < head.repeat; count++) {
3012 uint64_t chunk;
3013 uint8_t *chunk_start, *chunk_end;
3014
3015 reg = &registers[count];
3016 network_to_register(reg);
3017
3018 reg_result = &results[count];
3019
733252de 3020 trace_qemu_rdma_registration_handle_register_loop(count,
2da776db
MH
3021 reg->current_index, reg->key.current_addr, reg->chunks);
3022
3023 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3024 if (block->is_ram_block) {
3025 host_addr = (block->local_host_addr +
3026 (reg->key.current_addr - block->offset));
3027 chunk = ram_chunk_index(block->local_host_addr,
3028 (uint8_t *) host_addr);
3029 } else {
3030 chunk = reg->key.chunk;
3031 host_addr = block->local_host_addr +
3032 (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT));
3033 }
3034 chunk_start = ram_chunk_start(block, chunk);
3035 chunk_end = ram_chunk_end(block, chunk + reg->chunks);
3036 if (qemu_rdma_register_and_get_keys(rdma, block,
3037 (uint8_t *)host_addr, NULL, &reg_result->rkey,
3038 chunk, chunk_start, chunk_end)) {
733252de 3039 error_report("cannot get rkey");
2da776db
MH
3040 ret = -EINVAL;
3041 goto out;
3042 }
3043
3044 reg_result->host_addr = (uint64_t) block->local_host_addr;
3045
733252de
DDAG
3046 trace_qemu_rdma_registration_handle_register_rkey(
3047 reg_result->rkey);
2da776db
MH
3048
3049 result_to_network(reg_result);
3050 }
3051
3052 ret = qemu_rdma_post_send_control(rdma,
3053 (uint8_t *) results, &reg_resp);
3054
3055 if (ret < 0) {
733252de 3056 error_report("Failed to send control buffer");
2da776db
MH
3057 goto out;
3058 }
3059 break;
3060 case RDMA_CONTROL_UNREGISTER_REQUEST:
733252de 3061 trace_qemu_rdma_registration_handle_unregister(head.repeat);
2da776db
MH
3062 unreg_resp.repeat = head.repeat;
3063 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3064
3065 for (count = 0; count < head.repeat; count++) {
3066 reg = &registers[count];
3067 network_to_register(reg);
3068
733252de
DDAG
3069 trace_qemu_rdma_registration_handle_unregister_loop(count,
3070 reg->current_index, reg->key.chunk);
2da776db
MH
3071
3072 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3073
3074 ret = ibv_dereg_mr(block->pmr[reg->key.chunk]);
3075 block->pmr[reg->key.chunk] = NULL;
3076
3077 if (ret != 0) {
3078 perror("rdma unregistration chunk failed");
3079 ret = -ret;
3080 goto out;
3081 }
3082
3083 rdma->total_registrations--;
3084
733252de
DDAG
3085 trace_qemu_rdma_registration_handle_unregister_success(
3086 reg->key.chunk);
2da776db
MH
3087 }
3088
3089 ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
3090
3091 if (ret < 0) {
733252de 3092 error_report("Failed to send control buffer");
2da776db
MH
3093 goto out;
3094 }
3095 break;
3096 case RDMA_CONTROL_REGISTER_RESULT:
733252de 3097 error_report("Invalid RESULT message at dest.");
2da776db
MH
3098 ret = -EIO;
3099 goto out;
3100 default:
733252de 3101 error_report("Unknown control message %s", control_desc[head.type]);
2da776db
MH
3102 ret = -EIO;
3103 goto out;
3104 }
3105 } while (1);
3106out:
3107 if (ret < 0) {
3108 rdma->error_state = ret;
3109 }
3110 return ret;
3111}
3112
3113static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
3114 uint64_t flags)
3115{
3116 QEMUFileRDMA *rfile = opaque;
3117 RDMAContext *rdma = rfile->rdma;
3118
3119 CHECK_ERROR_STATE();
3120
733252de 3121 trace_qemu_rdma_registration_start(flags);
2da776db
MH
3122 qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
3123 qemu_fflush(f);
3124
3125 return 0;
3126}
3127
3128/*
3129 * Inform dest that dynamic registrations are done for now.
3130 * First, flush writes, if any.
3131 */
3132static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
3133 uint64_t flags)
3134{
3135 Error *local_err = NULL, **errp = &local_err;
3136 QEMUFileRDMA *rfile = opaque;
3137 RDMAContext *rdma = rfile->rdma;
3138 RDMAControlHeader head = { .len = 0, .repeat = 1 };
3139 int ret = 0;
3140
3141 CHECK_ERROR_STATE();
3142
3143 qemu_fflush(f);
3144 ret = qemu_rdma_drain_cq(f, rdma);
3145
3146 if (ret < 0) {
3147 goto err;
3148 }
3149
3150 if (flags == RAM_CONTROL_SETUP) {
3151 RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT };
3152 RDMALocalBlocks *local = &rdma->local_ram_blocks;
3153 int reg_result_idx, i, j, nb_remote_blocks;
3154
3155 head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
733252de 3156 trace_qemu_rdma_registration_stop_ram();
2da776db
MH
3157
3158 /*
3159 * Make sure that we parallelize the pinning on both sides.
3160 * For very large guests, doing this serially takes a really
3161 * long time, so we have to 'interleave' the pinning locally
3162 * with the control messages by performing the pinning on this
3163 * side before we receive the control response from the other
3164 * side that the pinning has completed.
3165 */
3166 ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp,
3167 &reg_result_idx, rdma->pin_all ?
3168 qemu_rdma_reg_whole_ram_blocks : NULL);
3169 if (ret < 0) {
66988941 3170 ERROR(errp, "receiving remote info!");
2da776db
MH
3171 return ret;
3172 }
3173
2da776db
MH
3174 nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock);
3175
3176 /*
3177 * The protocol uses two different sets of rkeys (mutually exclusive):
3178 * 1. One key to represent the virtual address of the entire ram block.
3179 * (dynamic chunk registration disabled - pin everything with one rkey.)
3180 * 2. One to represent individual chunks within a ram block.
3181 * (dynamic chunk registration enabled - pin individual chunks.)
3182 *
3183 * Once the capability is successfully negotiated, the destination transmits
3184 * the keys to use (or sends them later) including the virtual addresses
3185 * and then propagates the remote ram block descriptions to his local copy.
3186 */
3187
3188 if (local->nb_blocks != nb_remote_blocks) {
3189 ERROR(errp, "ram blocks mismatch #1! "
3190 "Your QEMU command line parameters are probably "
66988941 3191 "not identical on both the source and destination.");
2da776db
MH
3192 return -EINVAL;
3193 }
3194
885e8f98
IY
3195 qemu_rdma_move_header(rdma, reg_result_idx, &resp);
3196 memcpy(rdma->block,
3197 rdma->wr_data[reg_result_idx].control_curr, resp.len);
2da776db
MH
3198 for (i = 0; i < nb_remote_blocks; i++) {
3199 network_to_remote_block(&rdma->block[i]);
3200
3201 /* search local ram blocks */
3202 for (j = 0; j < local->nb_blocks; j++) {
3203 if (rdma->block[i].offset != local->block[j].offset) {
3204 continue;
3205 }
3206
3207 if (rdma->block[i].length != local->block[j].length) {
3208 ERROR(errp, "ram blocks mismatch #2! "
3209 "Your QEMU command line parameters are probably "
66988941 3210 "not identical on both the source and destination.");
2da776db
MH
3211 return -EINVAL;
3212 }
3213 local->block[j].remote_host_addr =
3214 rdma->block[i].remote_host_addr;
3215 local->block[j].remote_rkey = rdma->block[i].remote_rkey;
3216 break;
3217 }
3218
3219 if (j >= local->nb_blocks) {
3220 ERROR(errp, "ram blocks mismatch #3! "
3221 "Your QEMU command line parameters are probably "
66988941 3222 "not identical on both the source and destination.");
2da776db
MH
3223 return -EINVAL;
3224 }
3225 }
3226 }
3227
733252de 3228 trace_qemu_rdma_registration_stop(flags);
2da776db
MH
3229
3230 head.type = RDMA_CONTROL_REGISTER_FINISHED;
3231 ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
3232
3233 if (ret < 0) {
3234 goto err;
3235 }
3236
3237 return 0;
3238err:
3239 rdma->error_state = ret;
3240 return ret;
3241}
3242
3243static int qemu_rdma_get_fd(void *opaque)
3244{
3245 QEMUFileRDMA *rfile = opaque;
3246 RDMAContext *rdma = rfile->rdma;
3247
3248 return rdma->comp_channel->fd;
3249}
3250
2ae31aea 3251static const QEMUFileOps rdma_read_ops = {
2da776db
MH
3252 .get_buffer = qemu_rdma_get_buffer,
3253 .get_fd = qemu_rdma_get_fd,
3254 .close = qemu_rdma_close,
3255 .hook_ram_load = qemu_rdma_registration_handle,
3256};
3257
2ae31aea 3258static const QEMUFileOps rdma_write_ops = {
2da776db
MH
3259 .put_buffer = qemu_rdma_put_buffer,
3260 .close = qemu_rdma_close,
3261 .before_ram_iterate = qemu_rdma_registration_start,
3262 .after_ram_iterate = qemu_rdma_registration_stop,
3263 .save_page = qemu_rdma_save_page,
3264};
3265
3266static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
3267{
3268 QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA));
3269
3270 if (qemu_file_mode_is_not_valid(mode)) {
3271 return NULL;
3272 }
3273
3274 r->rdma = rdma;
3275
3276 if (mode[0] == 'w') {
3277 r->file = qemu_fopen_ops(r, &rdma_write_ops);
3278 } else {
3279 r->file = qemu_fopen_ops(r, &rdma_read_ops);
3280 }
3281
3282 return r->file;
3283}
3284
3285static void rdma_accept_incoming_migration(void *opaque)
3286{
3287 RDMAContext *rdma = opaque;
3288 int ret;
3289 QEMUFile *f;
3290 Error *local_err = NULL, **errp = &local_err;
3291
733252de 3292 trace_qemu_dma_accept_incoming_migration();
2da776db
MH
3293 ret = qemu_rdma_accept(rdma);
3294
3295 if (ret) {
66988941 3296 ERROR(errp, "RDMA Migration initialization failed!");
2da776db
MH
3297 return;
3298 }
3299
733252de 3300 trace_qemu_dma_accept_incoming_migration_accepted();
2da776db
MH
3301
3302 f = qemu_fopen_rdma(rdma, "rb");
3303 if (f == NULL) {
66988941 3304 ERROR(errp, "could not qemu_fopen_rdma!");
2da776db
MH
3305 qemu_rdma_cleanup(rdma);
3306 return;
3307 }
3308
3309 rdma->migration_started_on_destination = 1;
3310 process_incoming_migration(f);
3311}
3312
3313void rdma_start_incoming_migration(const char *host_port, Error **errp)
3314{
3315 int ret;
3316 RDMAContext *rdma;
3317 Error *local_err = NULL;
3318
733252de 3319 trace_rdma_start_incoming_migration();
2da776db
MH
3320 rdma = qemu_rdma_data_init(host_port, &local_err);
3321
3322 if (rdma == NULL) {
3323 goto err;
3324 }
3325
3326 ret = qemu_rdma_dest_init(rdma, &local_err);
3327
3328 if (ret) {
3329 goto err;
3330 }
3331
733252de 3332 trace_rdma_start_incoming_migration_after_dest_init();
2da776db
MH
3333
3334 ret = rdma_listen(rdma->listen_id, 5);
3335
3336 if (ret) {
66988941 3337 ERROR(errp, "listening on socket!");
2da776db
MH
3338 goto err;
3339 }
3340
733252de 3341 trace_rdma_start_incoming_migration_after_rdma_listen();
2da776db
MH
3342
3343 qemu_set_fd_handler2(rdma->channel->fd, NULL,
3344 rdma_accept_incoming_migration, NULL,
3345 (void *)(intptr_t) rdma);
3346 return;
3347err:
3348 error_propagate(errp, local_err);
3349 g_free(rdma);
3350}
3351
3352void rdma_start_outgoing_migration(void *opaque,
3353 const char *host_port, Error **errp)
3354{
3355 MigrationState *s = opaque;
3356 Error *local_err = NULL, **temp = &local_err;
3357 RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err);
3358 int ret = 0;
3359
3360 if (rdma == NULL) {
66988941 3361 ERROR(temp, "Failed to initialize RDMA data structures! %d", ret);
2da776db
MH
3362 goto err;
3363 }
3364
3365 ret = qemu_rdma_source_init(rdma, &local_err,
41310c68 3366 s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]);
2da776db
MH
3367
3368 if (ret) {
3369 goto err;
3370 }
3371
733252de 3372 trace_rdma_start_outgoing_migration_after_rdma_source_init();
2da776db
MH
3373 ret = qemu_rdma_connect(rdma, &local_err);
3374
3375 if (ret) {
3376 goto err;
3377 }
3378
733252de 3379 trace_rdma_start_outgoing_migration_after_rdma_connect();
2da776db
MH
3380
3381 s->file = qemu_fopen_rdma(rdma, "wb");
3382 migrate_fd_connect(s);
3383 return;
3384err:
3385 error_propagate(errp, local_err);
3386 g_free(rdma);
3387 migrate_fd_error(s);
3388}