]> git.proxmox.com Git - mirror_qemu.git/blame - migration/rdma.c
Merge remote-tracking branch 'remotes/berrange/tags/qcrypto-next-pull-request' into...
[mirror_qemu.git] / migration / rdma.c
CommitLineData
2da776db
MH
1/*
2 * RDMA protocol and interfaces
3 *
4 * Copyright IBM, Corp. 2010-2013
6ddd2d76 5 * Copyright Red Hat, Inc. 2015-2016
2da776db
MH
6 *
7 * Authors:
8 * Michael R. Hines <mrhines@us.ibm.com>
9 * Jiuxing Liu <jl@us.ibm.com>
6ddd2d76 10 * Daniel P. Berrange <berrange@redhat.com>
2da776db
MH
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
14 *
15 */
0b8fa32f 16
1393a485 17#include "qemu/osdep.h"
da34e65c 18#include "qapi/error.h"
f348b6d1 19#include "qemu/cutils.h"
e1a3ecee 20#include "rdma.h"
6666c96a 21#include "migration.h"
08a0aee1 22#include "qemu-file.h"
7b1e1a22 23#include "ram.h"
40014d81 24#include "qemu-file-channel.h"
d49b6836 25#include "qemu/error-report.h"
2da776db 26#include "qemu/main-loop.h"
0b8fa32f 27#include "qemu/module.h"
d4842052 28#include "qemu/rcu.h"
2da776db
MH
29#include "qemu/sockets.h"
30#include "qemu/bitmap.h"
10817bf0 31#include "qemu/coroutine.h"
2da776db
MH
32#include <sys/socket.h>
33#include <netdb.h>
34#include <arpa/inet.h>
2da776db 35#include <rdma/rdma_cma.h>
733252de 36#include "trace.h"
2da776db
MH
37
38/*
39 * Print and error on both the Monitor and the Log file.
40 */
41#define ERROR(errp, fmt, ...) \
42 do { \
66988941 43 fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
2da776db
MH
44 if (errp && (*(errp) == NULL)) { \
45 error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
46 } \
47 } while (0)
48
49#define RDMA_RESOLVE_TIMEOUT_MS 10000
50
51/* Do not merge data if larger than this. */
52#define RDMA_MERGE_MAX (2 * 1024 * 1024)
53#define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
54
55#define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
56
57/*
58 * This is only for non-live state being migrated.
59 * Instead of RDMA_WRITE messages, we use RDMA_SEND
60 * messages for that state, which requires a different
61 * delivery design than main memory.
62 */
63#define RDMA_SEND_INCREMENT 32768
64
65/*
66 * Maximum size infiniband SEND message
67 */
68#define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
69#define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
70
71#define RDMA_CONTROL_VERSION_CURRENT 1
72/*
73 * Capabilities for negotiation.
74 */
75#define RDMA_CAPABILITY_PIN_ALL 0x01
76
77/*
78 * Add the other flags above to this list of known capabilities
79 * as they are introduced.
80 */
81static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL;
82
83#define CHECK_ERROR_STATE() \
84 do { \
85 if (rdma->error_state) { \
86 if (!rdma->error_reported) { \
733252de
DDAG
87 error_report("RDMA is in an error state waiting migration" \
88 " to abort!"); \
2da776db
MH
89 rdma->error_reported = 1; \
90 } \
74637e6f 91 rcu_read_unlock(); \
2da776db
MH
92 return rdma->error_state; \
93 } \
2562755e 94 } while (0)
2da776db
MH
95
96/*
97 * A work request ID is 64-bits and we split up these bits
98 * into 3 parts:
99 *
100 * bits 0-15 : type of control message, 2^16
101 * bits 16-29: ram block index, 2^14
102 * bits 30-63: ram block chunk number, 2^34
103 *
104 * The last two bit ranges are only used for RDMA writes,
105 * in order to track their completion and potentially
106 * also track unregistration status of the message.
107 */
108#define RDMA_WRID_TYPE_SHIFT 0UL
109#define RDMA_WRID_BLOCK_SHIFT 16UL
110#define RDMA_WRID_CHUNK_SHIFT 30UL
111
112#define RDMA_WRID_TYPE_MASK \
113 ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
114
115#define RDMA_WRID_BLOCK_MASK \
116 (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
117
118#define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
119
120/*
121 * RDMA migration protocol:
122 * 1. RDMA Writes (data messages, i.e. RAM)
123 * 2. IB Send/Recv (control channel messages)
124 */
125enum {
126 RDMA_WRID_NONE = 0,
127 RDMA_WRID_RDMA_WRITE = 1,
128 RDMA_WRID_SEND_CONTROL = 2000,
129 RDMA_WRID_RECV_CONTROL = 4000,
130};
131
2ae31aea 132static const char *wrid_desc[] = {
2da776db
MH
133 [RDMA_WRID_NONE] = "NONE",
134 [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA",
135 [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND",
136 [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV",
137};
138
139/*
140 * Work request IDs for IB SEND messages only (not RDMA writes).
141 * This is used by the migration protocol to transmit
142 * control messages (such as device state and registration commands)
143 *
144 * We could use more WRs, but we have enough for now.
145 */
146enum {
147 RDMA_WRID_READY = 0,
148 RDMA_WRID_DATA,
149 RDMA_WRID_CONTROL,
150 RDMA_WRID_MAX,
151};
152
153/*
154 * SEND/RECV IB Control Messages.
155 */
156enum {
157 RDMA_CONTROL_NONE = 0,
158 RDMA_CONTROL_ERROR,
159 RDMA_CONTROL_READY, /* ready to receive */
160 RDMA_CONTROL_QEMU_FILE, /* QEMUFile-transmitted bytes */
161 RDMA_CONTROL_RAM_BLOCKS_REQUEST, /* RAMBlock synchronization */
162 RDMA_CONTROL_RAM_BLOCKS_RESULT, /* RAMBlock synchronization */
163 RDMA_CONTROL_COMPRESS, /* page contains repeat values */
164 RDMA_CONTROL_REGISTER_REQUEST, /* dynamic page registration */
165 RDMA_CONTROL_REGISTER_RESULT, /* key to use after registration */
166 RDMA_CONTROL_REGISTER_FINISHED, /* current iteration finished */
167 RDMA_CONTROL_UNREGISTER_REQUEST, /* dynamic UN-registration */
168 RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */
169};
170
2da776db
MH
171
172/*
173 * Memory and MR structures used to represent an IB Send/Recv work request.
174 * This is *not* used for RDMA writes, only IB Send/Recv.
175 */
176typedef struct {
177 uint8_t control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */
178 struct ibv_mr *control_mr; /* registration metadata */
179 size_t control_len; /* length of the message */
180 uint8_t *control_curr; /* start of unconsumed bytes */
181} RDMAWorkRequestData;
182
183/*
184 * Negotiate RDMA capabilities during connection-setup time.
185 */
186typedef struct {
187 uint32_t version;
188 uint32_t flags;
189} RDMACapabilities;
190
191static void caps_to_network(RDMACapabilities *cap)
192{
193 cap->version = htonl(cap->version);
194 cap->flags = htonl(cap->flags);
195}
196
197static void network_to_caps(RDMACapabilities *cap)
198{
199 cap->version = ntohl(cap->version);
200 cap->flags = ntohl(cap->flags);
201}
202
203/*
204 * Representation of a RAMBlock from an RDMA perspective.
205 * This is not transmitted, only local.
206 * This and subsequent structures cannot be linked lists
207 * because we're using a single IB message to transmit
208 * the information. It's small anyway, so a list is overkill.
209 */
210typedef struct RDMALocalBlock {
4fb5364b
DDAG
211 char *block_name;
212 uint8_t *local_host_addr; /* local virtual address */
213 uint64_t remote_host_addr; /* remote virtual address */
214 uint64_t offset;
215 uint64_t length;
216 struct ibv_mr **pmr; /* MRs for chunk-level registration */
217 struct ibv_mr *mr; /* MR for non-chunk-level registration */
218 uint32_t *remote_keys; /* rkeys for chunk-level registration */
219 uint32_t remote_rkey; /* rkeys for non-chunk-level registration */
220 int index; /* which block are we */
e4d63320 221 unsigned int src_index; /* (Only used on dest) */
4fb5364b
DDAG
222 bool is_ram_block;
223 int nb_chunks;
2da776db
MH
224 unsigned long *transit_bitmap;
225 unsigned long *unregister_bitmap;
226} RDMALocalBlock;
227
228/*
229 * Also represents a RAMblock, but only on the dest.
230 * This gets transmitted by the dest during connection-time
231 * to the source VM and then is used to populate the
232 * corresponding RDMALocalBlock with
233 * the information needed to perform the actual RDMA.
234 */
a97270ad 235typedef struct QEMU_PACKED RDMADestBlock {
2da776db
MH
236 uint64_t remote_host_addr;
237 uint64_t offset;
238 uint64_t length;
239 uint32_t remote_rkey;
240 uint32_t padding;
a97270ad 241} RDMADestBlock;
2da776db 242
482a33c5
DDAG
243static const char *control_desc(unsigned int rdma_control)
244{
245 static const char *strs[] = {
246 [RDMA_CONTROL_NONE] = "NONE",
247 [RDMA_CONTROL_ERROR] = "ERROR",
248 [RDMA_CONTROL_READY] = "READY",
249 [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE",
250 [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST",
251 [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT",
252 [RDMA_CONTROL_COMPRESS] = "COMPRESS",
253 [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST",
254 [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT",
255 [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED",
256 [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST",
257 [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED",
258 };
259
260 if (rdma_control > RDMA_CONTROL_UNREGISTER_FINISHED) {
261 return "??BAD CONTROL VALUE??";
262 }
263
264 return strs[rdma_control];
265}
266
2da776db
MH
267static uint64_t htonll(uint64_t v)
268{
269 union { uint32_t lv[2]; uint64_t llv; } u;
270 u.lv[0] = htonl(v >> 32);
271 u.lv[1] = htonl(v & 0xFFFFFFFFULL);
272 return u.llv;
273}
274
275static uint64_t ntohll(uint64_t v) {
276 union { uint32_t lv[2]; uint64_t llv; } u;
277 u.llv = v;
278 return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]);
279}
280
a97270ad 281static void dest_block_to_network(RDMADestBlock *db)
2da776db 282{
a97270ad
DDAG
283 db->remote_host_addr = htonll(db->remote_host_addr);
284 db->offset = htonll(db->offset);
285 db->length = htonll(db->length);
286 db->remote_rkey = htonl(db->remote_rkey);
2da776db
MH
287}
288
a97270ad 289static void network_to_dest_block(RDMADestBlock *db)
2da776db 290{
a97270ad
DDAG
291 db->remote_host_addr = ntohll(db->remote_host_addr);
292 db->offset = ntohll(db->offset);
293 db->length = ntohll(db->length);
294 db->remote_rkey = ntohl(db->remote_rkey);
2da776db
MH
295}
296
297/*
298 * Virtual address of the above structures used for transmitting
299 * the RAMBlock descriptions at connection-time.
300 * This structure is *not* transmitted.
301 */
302typedef struct RDMALocalBlocks {
303 int nb_blocks;
304 bool init; /* main memory init complete */
305 RDMALocalBlock *block;
306} RDMALocalBlocks;
307
308/*
309 * Main data structure for RDMA state.
310 * While there is only one copy of this structure being allocated right now,
311 * this is the place where one would start if you wanted to consider
312 * having more than one RDMA connection open at the same time.
313 */
314typedef struct RDMAContext {
315 char *host;
316 int port;
317
1f22364b 318 RDMAWorkRequestData wr_data[RDMA_WRID_MAX];
2da776db
MH
319
320 /*
321 * This is used by *_exchange_send() to figure out whether or not
322 * the initial "READY" message has already been received or not.
323 * This is because other functions may potentially poll() and detect
324 * the READY message before send() does, in which case we need to
325 * know if it completed.
326 */
327 int control_ready_expected;
328
329 /* number of outstanding writes */
330 int nb_sent;
331
332 /* store info about current buffer so that we can
333 merge it with future sends */
334 uint64_t current_addr;
335 uint64_t current_length;
336 /* index of ram block the current buffer belongs to */
337 int current_index;
338 /* index of the chunk in the current ram block */
339 int current_chunk;
340
341 bool pin_all;
342
343 /*
344 * infiniband-specific variables for opening the device
345 * and maintaining connection state and so forth.
346 *
347 * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
348 * cm_id->verbs, cm_id->channel, and cm_id->qp.
349 */
350 struct rdma_cm_id *cm_id; /* connection manager ID */
351 struct rdma_cm_id *listen_id;
5a91337c 352 bool connected;
2da776db
MH
353
354 struct ibv_context *verbs;
355 struct rdma_event_channel *channel;
356 struct ibv_qp *qp; /* queue pair */
357 struct ibv_comp_channel *comp_channel; /* completion channel */
358 struct ibv_pd *pd; /* protection domain */
359 struct ibv_cq *cq; /* completion queue */
360
361 /*
362 * If a previous write failed (perhaps because of a failed
363 * memory registration, then do not attempt any future work
364 * and remember the error state.
365 */
366 int error_state;
367 int error_reported;
cd5ea070 368 int received_error;
2da776db
MH
369
370 /*
371 * Description of ram blocks used throughout the code.
372 */
373 RDMALocalBlocks local_ram_blocks;
a97270ad 374 RDMADestBlock *dest_blocks;
2da776db 375
e4d63320
DDAG
376 /* Index of the next RAMBlock received during block registration */
377 unsigned int next_src_index;
378
2da776db
MH
379 /*
380 * Migration on *destination* started.
381 * Then use coroutine yield function.
382 * Source runs in a thread, so we don't care.
383 */
384 int migration_started_on_destination;
385
386 int total_registrations;
387 int total_writes;
388
389 int unregister_current, unregister_next;
390 uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX];
391
392 GHashTable *blockmap;
55cc1b59
LC
393
394 /* the RDMAContext for return path */
395 struct RDMAContext *return_path;
396 bool is_return_path;
2da776db
MH
397} RDMAContext;
398
6ddd2d76
DB
399#define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
400#define QIO_CHANNEL_RDMA(obj) \
401 OBJECT_CHECK(QIOChannelRDMA, (obj), TYPE_QIO_CHANNEL_RDMA)
402
403typedef struct QIOChannelRDMA QIOChannelRDMA;
404
405
406struct QIOChannelRDMA {
407 QIOChannel parent;
74637e6f
LC
408 RDMAContext *rdmain;
409 RDMAContext *rdmaout;
6ddd2d76 410 QEMUFile *file;
6ddd2d76
DB
411 bool blocking; /* XXX we don't actually honour this yet */
412};
2da776db
MH
413
414/*
415 * Main structure for IB Send/Recv control messages.
416 * This gets prepended at the beginning of every Send/Recv.
417 */
418typedef struct QEMU_PACKED {
419 uint32_t len; /* Total length of data portion */
420 uint32_t type; /* which control command to perform */
421 uint32_t repeat; /* number of commands in data portion of same type */
422 uint32_t padding;
423} RDMAControlHeader;
424
425static void control_to_network(RDMAControlHeader *control)
426{
427 control->type = htonl(control->type);
428 control->len = htonl(control->len);
429 control->repeat = htonl(control->repeat);
430}
431
432static void network_to_control(RDMAControlHeader *control)
433{
434 control->type = ntohl(control->type);
435 control->len = ntohl(control->len);
436 control->repeat = ntohl(control->repeat);
437}
438
439/*
440 * Register a single Chunk.
441 * Information sent by the source VM to inform the dest
442 * to register an single chunk of memory before we can perform
443 * the actual RDMA operation.
444 */
445typedef struct QEMU_PACKED {
446 union QEMU_PACKED {
b12f7777 447 uint64_t current_addr; /* offset into the ram_addr_t space */
2da776db
MH
448 uint64_t chunk; /* chunk to lookup if unregistering */
449 } key;
450 uint32_t current_index; /* which ramblock the chunk belongs to */
451 uint32_t padding;
452 uint64_t chunks; /* how many sequential chunks to register */
453} RDMARegister;
454
b12f7777 455static void register_to_network(RDMAContext *rdma, RDMARegister *reg)
2da776db 456{
b12f7777
DDAG
457 RDMALocalBlock *local_block;
458 local_block = &rdma->local_ram_blocks.block[reg->current_index];
459
460 if (local_block->is_ram_block) {
461 /*
462 * current_addr as passed in is an address in the local ram_addr_t
463 * space, we need to translate this for the destination
464 */
465 reg->key.current_addr -= local_block->offset;
466 reg->key.current_addr += rdma->dest_blocks[reg->current_index].offset;
467 }
2da776db
MH
468 reg->key.current_addr = htonll(reg->key.current_addr);
469 reg->current_index = htonl(reg->current_index);
470 reg->chunks = htonll(reg->chunks);
471}
472
473static void network_to_register(RDMARegister *reg)
474{
475 reg->key.current_addr = ntohll(reg->key.current_addr);
476 reg->current_index = ntohl(reg->current_index);
477 reg->chunks = ntohll(reg->chunks);
478}
479
480typedef struct QEMU_PACKED {
481 uint32_t value; /* if zero, we will madvise() */
482 uint32_t block_idx; /* which ram block index */
b12f7777 483 uint64_t offset; /* Address in remote ram_addr_t space */
2da776db
MH
484 uint64_t length; /* length of the chunk */
485} RDMACompress;
486
b12f7777 487static void compress_to_network(RDMAContext *rdma, RDMACompress *comp)
2da776db
MH
488{
489 comp->value = htonl(comp->value);
b12f7777
DDAG
490 /*
491 * comp->offset as passed in is an address in the local ram_addr_t
492 * space, we need to translate this for the destination
493 */
494 comp->offset -= rdma->local_ram_blocks.block[comp->block_idx].offset;
495 comp->offset += rdma->dest_blocks[comp->block_idx].offset;
2da776db
MH
496 comp->block_idx = htonl(comp->block_idx);
497 comp->offset = htonll(comp->offset);
498 comp->length = htonll(comp->length);
499}
500
501static void network_to_compress(RDMACompress *comp)
502{
503 comp->value = ntohl(comp->value);
504 comp->block_idx = ntohl(comp->block_idx);
505 comp->offset = ntohll(comp->offset);
506 comp->length = ntohll(comp->length);
507}
508
509/*
510 * The result of the dest's memory registration produces an "rkey"
511 * which the source VM must reference in order to perform
512 * the RDMA operation.
513 */
514typedef struct QEMU_PACKED {
515 uint32_t rkey;
516 uint32_t padding;
517 uint64_t host_addr;
518} RDMARegisterResult;
519
520static void result_to_network(RDMARegisterResult *result)
521{
522 result->rkey = htonl(result->rkey);
523 result->host_addr = htonll(result->host_addr);
524};
525
526static void network_to_result(RDMARegisterResult *result)
527{
528 result->rkey = ntohl(result->rkey);
529 result->host_addr = ntohll(result->host_addr);
530};
531
532const char *print_wrid(int wrid);
533static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
534 uint8_t *data, RDMAControlHeader *resp,
535 int *resp_idx,
536 int (*callback)(RDMAContext *rdma));
537
dd286ed7
IY
538static inline uint64_t ram_chunk_index(const uint8_t *start,
539 const uint8_t *host)
2da776db
MH
540{
541 return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT;
542}
543
dd286ed7 544static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block,
2da776db
MH
545 uint64_t i)
546{
fbce8c25
SW
547 return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr +
548 (i << RDMA_REG_CHUNK_SHIFT));
2da776db
MH
549}
550
dd286ed7
IY
551static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block,
552 uint64_t i)
2da776db
MH
553{
554 uint8_t *result = ram_chunk_start(rdma_ram_block, i) +
555 (1UL << RDMA_REG_CHUNK_SHIFT);
556
557 if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) {
558 result = rdma_ram_block->local_host_addr + rdma_ram_block->length;
559 }
560
561 return result;
562}
563
4fb5364b
DDAG
564static int rdma_add_block(RDMAContext *rdma, const char *block_name,
565 void *host_addr,
2da776db
MH
566 ram_addr_t block_offset, uint64_t length)
567{
568 RDMALocalBlocks *local = &rdma->local_ram_blocks;
760ff4be 569 RDMALocalBlock *block;
2da776db
MH
570 RDMALocalBlock *old = local->block;
571
97f3ad35 572 local->block = g_new0(RDMALocalBlock, local->nb_blocks + 1);
2da776db
MH
573
574 if (local->nb_blocks) {
575 int x;
576
760ff4be
DDAG
577 if (rdma->blockmap) {
578 for (x = 0; x < local->nb_blocks; x++) {
579 g_hash_table_remove(rdma->blockmap,
580 (void *)(uintptr_t)old[x].offset);
581 g_hash_table_insert(rdma->blockmap,
582 (void *)(uintptr_t)old[x].offset,
583 &local->block[x]);
584 }
2da776db
MH
585 }
586 memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks);
587 g_free(old);
588 }
589
590 block = &local->block[local->nb_blocks];
591
4fb5364b 592 block->block_name = g_strdup(block_name);
2da776db
MH
593 block->local_host_addr = host_addr;
594 block->offset = block_offset;
595 block->length = length;
596 block->index = local->nb_blocks;
e4d63320 597 block->src_index = ~0U; /* Filled in by the receipt of the block list */
2da776db
MH
598 block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL;
599 block->transit_bitmap = bitmap_new(block->nb_chunks);
600 bitmap_clear(block->transit_bitmap, 0, block->nb_chunks);
601 block->unregister_bitmap = bitmap_new(block->nb_chunks);
602 bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks);
97f3ad35 603 block->remote_keys = g_new0(uint32_t, block->nb_chunks);
2da776db
MH
604
605 block->is_ram_block = local->init ? false : true;
606
760ff4be 607 if (rdma->blockmap) {
80e60c6e 608 g_hash_table_insert(rdma->blockmap, (void *)(uintptr_t)block_offset, block);
760ff4be 609 }
2da776db 610
4fb5364b
DDAG
611 trace_rdma_add_block(block_name, local->nb_blocks,
612 (uintptr_t) block->local_host_addr,
ba795761 613 block->offset, block->length,
fbce8c25 614 (uintptr_t) (block->local_host_addr + block->length),
ba795761
DDAG
615 BITS_TO_LONGS(block->nb_chunks) *
616 sizeof(unsigned long) * 8,
617 block->nb_chunks);
2da776db
MH
618
619 local->nb_blocks++;
620
621 return 0;
622}
623
624/*
625 * Memory regions need to be registered with the device and queue pairs setup
626 * in advanced before the migration starts. This tells us where the RAM blocks
627 * are so that we can register them individually.
628 */
754cb9c0 629static int qemu_rdma_init_one_block(RAMBlock *rb, void *opaque)
2da776db 630{
754cb9c0
YK
631 const char *block_name = qemu_ram_get_idstr(rb);
632 void *host_addr = qemu_ram_get_host_addr(rb);
633 ram_addr_t block_offset = qemu_ram_get_offset(rb);
634 ram_addr_t length = qemu_ram_get_used_length(rb);
4fb5364b 635 return rdma_add_block(opaque, block_name, host_addr, block_offset, length);
2da776db
MH
636}
637
638/*
639 * Identify the RAMBlocks and their quantity. They will be references to
640 * identify chunk boundaries inside each RAMBlock and also be referenced
641 * during dynamic page registration.
642 */
643static int qemu_rdma_init_ram_blocks(RDMAContext *rdma)
644{
645 RDMALocalBlocks *local = &rdma->local_ram_blocks;
281496bb 646 int ret;
2da776db
MH
647
648 assert(rdma->blockmap == NULL);
2da776db 649 memset(local, 0, sizeof *local);
281496bb
DDAG
650 ret = foreach_not_ignored_block(qemu_rdma_init_one_block, rdma);
651 if (ret) {
652 return ret;
653 }
733252de 654 trace_qemu_rdma_init_ram_blocks(local->nb_blocks);
97f3ad35
MA
655 rdma->dest_blocks = g_new0(RDMADestBlock,
656 rdma->local_ram_blocks.nb_blocks);
2da776db
MH
657 local->init = true;
658 return 0;
659}
660
03fcab38
DDAG
661/*
662 * Note: If used outside of cleanup, the caller must ensure that the destination
663 * block structures are also updated
664 */
665static int rdma_delete_block(RDMAContext *rdma, RDMALocalBlock *block)
2da776db
MH
666{
667 RDMALocalBlocks *local = &rdma->local_ram_blocks;
2da776db
MH
668 RDMALocalBlock *old = local->block;
669 int x;
670
03fcab38
DDAG
671 if (rdma->blockmap) {
672 g_hash_table_remove(rdma->blockmap, (void *)(uintptr_t)block->offset);
673 }
2da776db
MH
674 if (block->pmr) {
675 int j;
676
677 for (j = 0; j < block->nb_chunks; j++) {
678 if (!block->pmr[j]) {
679 continue;
680 }
681 ibv_dereg_mr(block->pmr[j]);
682 rdma->total_registrations--;
683 }
684 g_free(block->pmr);
685 block->pmr = NULL;
686 }
687
688 if (block->mr) {
689 ibv_dereg_mr(block->mr);
690 rdma->total_registrations--;
691 block->mr = NULL;
692 }
693
694 g_free(block->transit_bitmap);
695 block->transit_bitmap = NULL;
696
697 g_free(block->unregister_bitmap);
698 block->unregister_bitmap = NULL;
699
700 g_free(block->remote_keys);
701 block->remote_keys = NULL;
702
4fb5364b
DDAG
703 g_free(block->block_name);
704 block->block_name = NULL;
705
03fcab38
DDAG
706 if (rdma->blockmap) {
707 for (x = 0; x < local->nb_blocks; x++) {
708 g_hash_table_remove(rdma->blockmap,
709 (void *)(uintptr_t)old[x].offset);
710 }
2da776db
MH
711 }
712
713 if (local->nb_blocks > 1) {
714
97f3ad35 715 local->block = g_new0(RDMALocalBlock, local->nb_blocks - 1);
2da776db
MH
716
717 if (block->index) {
718 memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index);
719 }
720
721 if (block->index < (local->nb_blocks - 1)) {
722 memcpy(local->block + block->index, old + (block->index + 1),
723 sizeof(RDMALocalBlock) *
724 (local->nb_blocks - (block->index + 1)));
71cd7306
LC
725 for (x = block->index; x < local->nb_blocks - 1; x++) {
726 local->block[x].index--;
727 }
2da776db
MH
728 }
729 } else {
730 assert(block == local->block);
731 local->block = NULL;
732 }
733
03fcab38 734 trace_rdma_delete_block(block, (uintptr_t)block->local_host_addr,
733252de 735 block->offset, block->length,
fbce8c25 736 (uintptr_t)(block->local_host_addr + block->length),
733252de
DDAG
737 BITS_TO_LONGS(block->nb_chunks) *
738 sizeof(unsigned long) * 8, block->nb_chunks);
2da776db
MH
739
740 g_free(old);
741
742 local->nb_blocks--;
743
03fcab38 744 if (local->nb_blocks && rdma->blockmap) {
2da776db 745 for (x = 0; x < local->nb_blocks; x++) {
fbce8c25
SW
746 g_hash_table_insert(rdma->blockmap,
747 (void *)(uintptr_t)local->block[x].offset,
748 &local->block[x]);
2da776db
MH
749 }
750 }
751
752 return 0;
753}
754
755/*
756 * Put in the log file which RDMA device was opened and the details
757 * associated with that device.
758 */
759static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs)
760{
7fc5b13f
MH
761 struct ibv_port_attr port;
762
763 if (ibv_query_port(verbs, 1, &port)) {
733252de 764 error_report("Failed to query port information");
7fc5b13f
MH
765 return;
766 }
767
2da776db
MH
768 printf("%s RDMA Device opened: kernel name %s "
769 "uverbs device name %s, "
7fc5b13f
MH
770 "infiniband_verbs class device path %s, "
771 "infiniband class device path %s, "
772 "transport: (%d) %s\n",
2da776db
MH
773 who,
774 verbs->device->name,
775 verbs->device->dev_name,
776 verbs->device->dev_path,
7fc5b13f
MH
777 verbs->device->ibdev_path,
778 port.link_layer,
779 (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" :
02942db7 780 ((port.link_layer == IBV_LINK_LAYER_ETHERNET)
7fc5b13f 781 ? "Ethernet" : "Unknown"));
2da776db
MH
782}
783
784/*
785 * Put in the log file the RDMA gid addressing information,
786 * useful for folks who have trouble understanding the
787 * RDMA device hierarchy in the kernel.
788 */
789static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id)
790{
791 char sgid[33];
792 char dgid[33];
793 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid);
794 inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid);
733252de 795 trace_qemu_rdma_dump_gid(who, sgid, dgid);
2da776db
MH
796}
797
7fc5b13f
MH
798/*
799 * As of now, IPv6 over RoCE / iWARP is not supported by linux.
800 * We will try the next addrinfo struct, and fail if there are
801 * no other valid addresses to bind against.
802 *
803 * If user is listening on '[::]', then we will not have a opened a device
804 * yet and have no way of verifying if the device is RoCE or not.
805 *
806 * In this case, the source VM will throw an error for ALL types of
807 * connections (both IPv4 and IPv6) if the destination machine does not have
808 * a regular infiniband network available for use.
809 *
4c293dc6 810 * The only way to guarantee that an error is thrown for broken kernels is
7fc5b13f
MH
811 * for the management software to choose a *specific* interface at bind time
812 * and validate what time of hardware it is.
813 *
814 * Unfortunately, this puts the user in a fix:
02942db7 815 *
7fc5b13f
MH
816 * If the source VM connects with an IPv4 address without knowing that the
817 * destination has bound to '[::]' the migration will unconditionally fail
b6af0975 818 * unless the management software is explicitly listening on the IPv4
7fc5b13f
MH
819 * address while using a RoCE-based device.
820 *
821 * If the source VM connects with an IPv6 address, then we're OK because we can
822 * throw an error on the source (and similarly on the destination).
02942db7 823 *
7fc5b13f
MH
824 * But in mixed environments, this will be broken for a while until it is fixed
825 * inside linux.
826 *
827 * We do provide a *tiny* bit of help in this function: We can list all of the
828 * devices in the system and check to see if all the devices are RoCE or
02942db7 829 * Infiniband.
7fc5b13f
MH
830 *
831 * If we detect that we have a *pure* RoCE environment, then we can safely
4c293dc6 832 * thrown an error even if the management software has specified '[::]' as the
7fc5b13f
MH
833 * bind address.
834 *
835 * However, if there is are multiple hetergeneous devices, then we cannot make
836 * this assumption and the user just has to be sure they know what they are
837 * doing.
838 *
839 * Patches are being reviewed on linux-rdma.
840 */
bbfb89e3 841static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp)
7fc5b13f 842{
7fc5b13f
MH
843 /* This bug only exists in linux, to our knowledge. */
844#ifdef CONFIG_LINUX
1f4abd81 845 struct ibv_port_attr port_attr;
7fc5b13f 846
02942db7 847 /*
7fc5b13f 848 * Verbs are only NULL if management has bound to '[::]'.
02942db7 849 *
7fc5b13f
MH
850 * Let's iterate through all the devices and see if there any pure IB
851 * devices (non-ethernet).
02942db7 852 *
7fc5b13f 853 * If not, then we can safely proceed with the migration.
4c293dc6 854 * Otherwise, there are no guarantees until the bug is fixed in linux.
7fc5b13f
MH
855 */
856 if (!verbs) {
02942db7 857 int num_devices, x;
7fc5b13f
MH
858 struct ibv_device ** dev_list = ibv_get_device_list(&num_devices);
859 bool roce_found = false;
860 bool ib_found = false;
861
862 for (x = 0; x < num_devices; x++) {
863 verbs = ibv_open_device(dev_list[x]);
5b61d575
PR
864 if (!verbs) {
865 if (errno == EPERM) {
866 continue;
867 } else {
868 return -EINVAL;
869 }
870 }
7fc5b13f
MH
871
872 if (ibv_query_port(verbs, 1, &port_attr)) {
873 ibv_close_device(verbs);
874 ERROR(errp, "Could not query initial IB port");
875 return -EINVAL;
876 }
877
878 if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) {
879 ib_found = true;
880 } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
881 roce_found = true;
882 }
883
884 ibv_close_device(verbs);
885
886 }
887
888 if (roce_found) {
889 if (ib_found) {
890 fprintf(stderr, "WARN: migrations may fail:"
891 " IPv6 over RoCE / iWARP in linux"
892 " is broken. But since you appear to have a"
893 " mixed RoCE / IB environment, be sure to only"
894 " migrate over the IB fabric until the kernel "
895 " fixes the bug.\n");
896 } else {
897 ERROR(errp, "You only have RoCE / iWARP devices in your systems"
898 " and your management software has specified '[::]'"
899 ", but IPv6 over RoCE / iWARP is not supported in Linux.");
900 return -ENONET;
901 }
902 }
903
904 return 0;
905 }
906
907 /*
908 * If we have a verbs context, that means that some other than '[::]' was
02942db7
SW
909 * used by the management software for binding. In which case we can
910 * actually warn the user about a potentially broken kernel.
7fc5b13f
MH
911 */
912
913 /* IB ports start with 1, not 0 */
914 if (ibv_query_port(verbs, 1, &port_attr)) {
915 ERROR(errp, "Could not query initial IB port");
916 return -EINVAL;
917 }
918
919 if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
920 ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 "
921 "(but patches on linux-rdma in progress)");
922 return -ENONET;
923 }
924
925#endif
926
927 return 0;
928}
929
2da776db
MH
930/*
931 * Figure out which RDMA device corresponds to the requested IP hostname
932 * Also create the initial connection manager identifiers for opening
933 * the connection.
934 */
935static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp)
936{
937 int ret;
7fc5b13f 938 struct rdma_addrinfo *res;
2da776db
MH
939 char port_str[16];
940 struct rdma_cm_event *cm_event;
941 char ip[40] = "unknown";
7fc5b13f 942 struct rdma_addrinfo *e;
2da776db
MH
943
944 if (rdma->host == NULL || !strcmp(rdma->host, "")) {
66988941 945 ERROR(errp, "RDMA hostname has not been set");
7fc5b13f 946 return -EINVAL;
2da776db
MH
947 }
948
949 /* create CM channel */
950 rdma->channel = rdma_create_event_channel();
951 if (!rdma->channel) {
66988941 952 ERROR(errp, "could not create CM channel");
7fc5b13f 953 return -EINVAL;
2da776db
MH
954 }
955
956 /* create CM id */
957 ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP);
958 if (ret) {
66988941 959 ERROR(errp, "could not create channel id");
2da776db
MH
960 goto err_resolve_create_id;
961 }
962
963 snprintf(port_str, 16, "%d", rdma->port);
964 port_str[15] = '\0';
965
7fc5b13f 966 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
2da776db 967 if (ret < 0) {
7fc5b13f 968 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2da776db
MH
969 goto err_resolve_get_addr;
970 }
971
6470215b
MH
972 for (e = res; e != NULL; e = e->ai_next) {
973 inet_ntop(e->ai_family,
7fc5b13f 974 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
733252de 975 trace_qemu_rdma_resolve_host_trying(rdma->host, ip);
2da776db 976
7fc5b13f 977 ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr,
6470215b
MH
978 RDMA_RESOLVE_TIMEOUT_MS);
979 if (!ret) {
c89aa2f1 980 if (e->ai_family == AF_INET6) {
bbfb89e3 981 ret = qemu_rdma_broken_ipv6_kernel(rdma->cm_id->verbs, errp);
c89aa2f1
MH
982 if (ret) {
983 continue;
984 }
7fc5b13f 985 }
6470215b
MH
986 goto route;
987 }
2da776db
MH
988 }
989
6470215b
MH
990 ERROR(errp, "could not resolve address %s", rdma->host);
991 goto err_resolve_get_addr;
992
993route:
2da776db
MH
994 qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id);
995
996 ret = rdma_get_cm_event(rdma->channel, &cm_event);
997 if (ret) {
66988941 998 ERROR(errp, "could not perform event_addr_resolved");
2da776db
MH
999 goto err_resolve_get_addr;
1000 }
1001
1002 if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) {
66988941 1003 ERROR(errp, "result not equal to event_addr_resolved %s",
2da776db
MH
1004 rdma_event_str(cm_event->event));
1005 perror("rdma_resolve_addr");
2a934347 1006 rdma_ack_cm_event(cm_event);
7fc5b13f 1007 ret = -EINVAL;
2da776db
MH
1008 goto err_resolve_get_addr;
1009 }
1010 rdma_ack_cm_event(cm_event);
1011
1012 /* resolve route */
1013 ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS);
1014 if (ret) {
66988941 1015 ERROR(errp, "could not resolve rdma route");
2da776db
MH
1016 goto err_resolve_get_addr;
1017 }
1018
1019 ret = rdma_get_cm_event(rdma->channel, &cm_event);
1020 if (ret) {
66988941 1021 ERROR(errp, "could not perform event_route_resolved");
2da776db
MH
1022 goto err_resolve_get_addr;
1023 }
1024 if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) {
66988941 1025 ERROR(errp, "result not equal to event_route_resolved: %s",
2da776db
MH
1026 rdma_event_str(cm_event->event));
1027 rdma_ack_cm_event(cm_event);
7fc5b13f 1028 ret = -EINVAL;
2da776db
MH
1029 goto err_resolve_get_addr;
1030 }
1031 rdma_ack_cm_event(cm_event);
1032 rdma->verbs = rdma->cm_id->verbs;
1033 qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs);
1034 qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id);
1035 return 0;
1036
1037err_resolve_get_addr:
1038 rdma_destroy_id(rdma->cm_id);
1039 rdma->cm_id = NULL;
1040err_resolve_create_id:
1041 rdma_destroy_event_channel(rdma->channel);
1042 rdma->channel = NULL;
7fc5b13f 1043 return ret;
2da776db
MH
1044}
1045
1046/*
1047 * Create protection domain and completion queues
1048 */
1049static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma)
1050{
1051 /* allocate pd */
1052 rdma->pd = ibv_alloc_pd(rdma->verbs);
1053 if (!rdma->pd) {
733252de 1054 error_report("failed to allocate protection domain");
2da776db
MH
1055 return -1;
1056 }
1057
1058 /* create completion channel */
1059 rdma->comp_channel = ibv_create_comp_channel(rdma->verbs);
1060 if (!rdma->comp_channel) {
733252de 1061 error_report("failed to allocate completion channel");
2da776db
MH
1062 goto err_alloc_pd_cq;
1063 }
1064
1065 /*
1066 * Completion queue can be filled by both read and write work requests,
1067 * so must reflect the sum of both possible queue sizes.
1068 */
1069 rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3),
1070 NULL, rdma->comp_channel, 0);
1071 if (!rdma->cq) {
733252de 1072 error_report("failed to allocate completion queue");
2da776db
MH
1073 goto err_alloc_pd_cq;
1074 }
1075
1076 return 0;
1077
1078err_alloc_pd_cq:
1079 if (rdma->pd) {
1080 ibv_dealloc_pd(rdma->pd);
1081 }
1082 if (rdma->comp_channel) {
1083 ibv_destroy_comp_channel(rdma->comp_channel);
1084 }
1085 rdma->pd = NULL;
1086 rdma->comp_channel = NULL;
1087 return -1;
1088
1089}
1090
1091/*
1092 * Create queue pairs.
1093 */
1094static int qemu_rdma_alloc_qp(RDMAContext *rdma)
1095{
1096 struct ibv_qp_init_attr attr = { 0 };
1097 int ret;
1098
1099 attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX;
1100 attr.cap.max_recv_wr = 3;
1101 attr.cap.max_send_sge = 1;
1102 attr.cap.max_recv_sge = 1;
1103 attr.send_cq = rdma->cq;
1104 attr.recv_cq = rdma->cq;
1105 attr.qp_type = IBV_QPT_RC;
1106
1107 ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr);
1108 if (ret) {
1109 return -1;
1110 }
1111
1112 rdma->qp = rdma->cm_id->qp;
1113 return 0;
1114}
1115
1116static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
1117{
1118 int i;
1119 RDMALocalBlocks *local = &rdma->local_ram_blocks;
1120
1121 for (i = 0; i < local->nb_blocks; i++) {
1122 local->block[i].mr =
1123 ibv_reg_mr(rdma->pd,
1124 local->block[i].local_host_addr,
1125 local->block[i].length,
1126 IBV_ACCESS_LOCAL_WRITE |
1127 IBV_ACCESS_REMOTE_WRITE
1128 );
1129 if (!local->block[i].mr) {
1130 perror("Failed to register local dest ram block!\n");
1131 break;
1132 }
1133 rdma->total_registrations++;
1134 }
1135
1136 if (i >= local->nb_blocks) {
1137 return 0;
1138 }
1139
1140 for (i--; i >= 0; i--) {
1141 ibv_dereg_mr(local->block[i].mr);
1142 rdma->total_registrations--;
1143 }
1144
1145 return -1;
1146
1147}
1148
1149/*
1150 * Find the ram block that corresponds to the page requested to be
1151 * transmitted by QEMU.
1152 *
1153 * Once the block is found, also identify which 'chunk' within that
1154 * block that the page belongs to.
1155 *
1156 * This search cannot fail or the migration will fail.
1157 */
1158static int qemu_rdma_search_ram_block(RDMAContext *rdma,
fbce8c25 1159 uintptr_t block_offset,
2da776db
MH
1160 uint64_t offset,
1161 uint64_t length,
1162 uint64_t *block_index,
1163 uint64_t *chunk_index)
1164{
1165 uint64_t current_addr = block_offset + offset;
1166 RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap,
1167 (void *) block_offset);
1168 assert(block);
1169 assert(current_addr >= block->offset);
1170 assert((current_addr + length) <= (block->offset + block->length));
1171
1172 *block_index = block->index;
1173 *chunk_index = ram_chunk_index(block->local_host_addr,
1174 block->local_host_addr + (current_addr - block->offset));
1175
1176 return 0;
1177}
1178
1179/*
1180 * Register a chunk with IB. If the chunk was already registered
1181 * previously, then skip.
1182 *
1183 * Also return the keys associated with the registration needed
1184 * to perform the actual RDMA operation.
1185 */
1186static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
3ac040c0 1187 RDMALocalBlock *block, uintptr_t host_addr,
2da776db
MH
1188 uint32_t *lkey, uint32_t *rkey, int chunk,
1189 uint8_t *chunk_start, uint8_t *chunk_end)
1190{
1191 if (block->mr) {
1192 if (lkey) {
1193 *lkey = block->mr->lkey;
1194 }
1195 if (rkey) {
1196 *rkey = block->mr->rkey;
1197 }
1198 return 0;
1199 }
1200
1201 /* allocate memory to store chunk MRs */
1202 if (!block->pmr) {
97f3ad35 1203 block->pmr = g_new0(struct ibv_mr *, block->nb_chunks);
2da776db
MH
1204 }
1205
1206 /*
1207 * If 'rkey', then we're the destination, so grant access to the source.
1208 *
1209 * If 'lkey', then we're the source VM, so grant access only to ourselves.
1210 */
1211 if (!block->pmr[chunk]) {
1212 uint64_t len = chunk_end - chunk_start;
1213
733252de 1214 trace_qemu_rdma_register_and_get_keys(len, chunk_start);
2da776db
MH
1215
1216 block->pmr[chunk] = ibv_reg_mr(rdma->pd,
1217 chunk_start, len,
1218 (rkey ? (IBV_ACCESS_LOCAL_WRITE |
1219 IBV_ACCESS_REMOTE_WRITE) : 0));
1220
1221 if (!block->pmr[chunk]) {
1222 perror("Failed to register chunk!");
1223 fprintf(stderr, "Chunk details: block: %d chunk index %d"
3ac040c0
SW
1224 " start %" PRIuPTR " end %" PRIuPTR
1225 " host %" PRIuPTR
1226 " local %" PRIuPTR " registrations: %d\n",
1227 block->index, chunk, (uintptr_t)chunk_start,
1228 (uintptr_t)chunk_end, host_addr,
1229 (uintptr_t)block->local_host_addr,
2da776db
MH
1230 rdma->total_registrations);
1231 return -1;
1232 }
1233 rdma->total_registrations++;
1234 }
1235
1236 if (lkey) {
1237 *lkey = block->pmr[chunk]->lkey;
1238 }
1239 if (rkey) {
1240 *rkey = block->pmr[chunk]->rkey;
1241 }
1242 return 0;
1243}
1244
1245/*
1246 * Register (at connection time) the memory used for control
1247 * channel messages.
1248 */
1249static int qemu_rdma_reg_control(RDMAContext *rdma, int idx)
1250{
1251 rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd,
1252 rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER,
1253 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
1254 if (rdma->wr_data[idx].control_mr) {
1255 rdma->total_registrations++;
1256 return 0;
1257 }
733252de 1258 error_report("qemu_rdma_reg_control failed");
2da776db
MH
1259 return -1;
1260}
1261
1262const char *print_wrid(int wrid)
1263{
1264 if (wrid >= RDMA_WRID_RECV_CONTROL) {
1265 return wrid_desc[RDMA_WRID_RECV_CONTROL];
1266 }
1267 return wrid_desc[wrid];
1268}
1269
1270/*
1271 * RDMA requires memory registration (mlock/pinning), but this is not good for
1272 * overcommitment.
1273 *
1274 * In preparation for the future where LRU information or workload-specific
1275 * writable writable working set memory access behavior is available to QEMU
1276 * it would be nice to have in place the ability to UN-register/UN-pin
1277 * particular memory regions from the RDMA hardware when it is determine that
1278 * those regions of memory will likely not be accessed again in the near future.
1279 *
1280 * While we do not yet have such information right now, the following
1281 * compile-time option allows us to perform a non-optimized version of this
1282 * behavior.
1283 *
1284 * By uncommenting this option, you will cause *all* RDMA transfers to be
1285 * unregistered immediately after the transfer completes on both sides of the
1286 * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
1287 *
1288 * This will have a terrible impact on migration performance, so until future
1289 * workload information or LRU information is available, do not attempt to use
1290 * this feature except for basic testing.
1291 */
1292//#define RDMA_UNREGISTRATION_EXAMPLE
1293
1294/*
1295 * Perform a non-optimized memory unregistration after every transfer
24ec68ef 1296 * for demonstration purposes, only if pin-all is not requested.
2da776db
MH
1297 *
1298 * Potential optimizations:
1299 * 1. Start a new thread to run this function continuously
1300 - for bit clearing
1301 - and for receipt of unregister messages
1302 * 2. Use an LRU.
1303 * 3. Use workload hints.
1304 */
1305static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
1306{
1307 while (rdma->unregistrations[rdma->unregister_current]) {
1308 int ret;
1309 uint64_t wr_id = rdma->unregistrations[rdma->unregister_current];
1310 uint64_t chunk =
1311 (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1312 uint64_t index =
1313 (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1314 RDMALocalBlock *block =
1315 &(rdma->local_ram_blocks.block[index]);
1316 RDMARegister reg = { .current_index = index };
1317 RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED,
1318 };
1319 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1320 .type = RDMA_CONTROL_UNREGISTER_REQUEST,
1321 .repeat = 1,
1322 };
1323
733252de
DDAG
1324 trace_qemu_rdma_unregister_waiting_proc(chunk,
1325 rdma->unregister_current);
2da776db
MH
1326
1327 rdma->unregistrations[rdma->unregister_current] = 0;
1328 rdma->unregister_current++;
1329
1330 if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) {
1331 rdma->unregister_current = 0;
1332 }
1333
1334
1335 /*
1336 * Unregistration is speculative (because migration is single-threaded
1337 * and we cannot break the protocol's inifinband message ordering).
1338 * Thus, if the memory is currently being used for transmission,
1339 * then abort the attempt to unregister and try again
1340 * later the next time a completion is received for this memory.
1341 */
1342 clear_bit(chunk, block->unregister_bitmap);
1343
1344 if (test_bit(chunk, block->transit_bitmap)) {
733252de 1345 trace_qemu_rdma_unregister_waiting_inflight(chunk);
2da776db
MH
1346 continue;
1347 }
1348
733252de 1349 trace_qemu_rdma_unregister_waiting_send(chunk);
2da776db
MH
1350
1351 ret = ibv_dereg_mr(block->pmr[chunk]);
1352 block->pmr[chunk] = NULL;
1353 block->remote_keys[chunk] = 0;
1354
1355 if (ret != 0) {
1356 perror("unregistration chunk failed");
1357 return -ret;
1358 }
1359 rdma->total_registrations--;
1360
1361 reg.key.chunk = chunk;
b12f7777 1362 register_to_network(rdma, &reg);
2da776db
MH
1363 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
1364 &resp, NULL, NULL);
1365 if (ret < 0) {
1366 return ret;
1367 }
1368
733252de 1369 trace_qemu_rdma_unregister_waiting_complete(chunk);
2da776db
MH
1370 }
1371
1372 return 0;
1373}
1374
1375static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index,
1376 uint64_t chunk)
1377{
1378 uint64_t result = wr_id & RDMA_WRID_TYPE_MASK;
1379
1380 result |= (index << RDMA_WRID_BLOCK_SHIFT);
1381 result |= (chunk << RDMA_WRID_CHUNK_SHIFT);
1382
1383 return result;
1384}
1385
1386/*
1387 * Set bit for unregistration in the next iteration.
1388 * We cannot transmit right here, but will unpin later.
1389 */
1390static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index,
1391 uint64_t chunk, uint64_t wr_id)
1392{
1393 if (rdma->unregistrations[rdma->unregister_next] != 0) {
733252de 1394 error_report("rdma migration: queue is full");
2da776db
MH
1395 } else {
1396 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1397
1398 if (!test_and_set_bit(chunk, block->unregister_bitmap)) {
733252de
DDAG
1399 trace_qemu_rdma_signal_unregister_append(chunk,
1400 rdma->unregister_next);
2da776db
MH
1401
1402 rdma->unregistrations[rdma->unregister_next++] =
1403 qemu_rdma_make_wrid(wr_id, index, chunk);
1404
1405 if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) {
1406 rdma->unregister_next = 0;
1407 }
1408 } else {
733252de 1409 trace_qemu_rdma_signal_unregister_already(chunk);
2da776db
MH
1410 }
1411 }
1412}
1413
1414/*
1415 * Consult the connection manager to see a work request
1416 * (of any kind) has completed.
1417 * Return the work request ID that completed.
1418 */
88571882
IY
1419static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out,
1420 uint32_t *byte_len)
2da776db
MH
1421{
1422 int ret;
1423 struct ibv_wc wc;
1424 uint64_t wr_id;
1425
1426 ret = ibv_poll_cq(rdma->cq, 1, &wc);
1427
1428 if (!ret) {
1429 *wr_id_out = RDMA_WRID_NONE;
1430 return 0;
1431 }
1432
1433 if (ret < 0) {
733252de 1434 error_report("ibv_poll_cq return %d", ret);
2da776db
MH
1435 return ret;
1436 }
1437
1438 wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
1439
1440 if (wc.status != IBV_WC_SUCCESS) {
1441 fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n",
1442 wc.status, ibv_wc_status_str(wc.status));
1443 fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]);
1444
1445 return -1;
1446 }
1447
1448 if (rdma->control_ready_expected &&
1449 (wr_id >= RDMA_WRID_RECV_CONTROL)) {
733252de 1450 trace_qemu_rdma_poll_recv(wrid_desc[RDMA_WRID_RECV_CONTROL],
2da776db
MH
1451 wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent);
1452 rdma->control_ready_expected = 0;
1453 }
1454
1455 if (wr_id == RDMA_WRID_RDMA_WRITE) {
1456 uint64_t chunk =
1457 (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT;
1458 uint64_t index =
1459 (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT;
1460 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]);
1461
733252de 1462 trace_qemu_rdma_poll_write(print_wrid(wr_id), wr_id, rdma->nb_sent,
fbce8c25
SW
1463 index, chunk, block->local_host_addr,
1464 (void *)(uintptr_t)block->remote_host_addr);
2da776db
MH
1465
1466 clear_bit(chunk, block->transit_bitmap);
1467
1468 if (rdma->nb_sent > 0) {
1469 rdma->nb_sent--;
1470 }
1471
1472 if (!rdma->pin_all) {
1473 /*
1474 * FYI: If one wanted to signal a specific chunk to be unregistered
1475 * using LRU or workload-specific information, this is the function
1476 * you would call to do so. That chunk would then get asynchronously
1477 * unregistered later.
1478 */
1479#ifdef RDMA_UNREGISTRATION_EXAMPLE
1480 qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id);
1481#endif
1482 }
1483 } else {
733252de 1484 trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent);
2da776db
MH
1485 }
1486
1487 *wr_id_out = wc.wr_id;
88571882
IY
1488 if (byte_len) {
1489 *byte_len = wc.byte_len;
1490 }
2da776db
MH
1491
1492 return 0;
1493}
1494
9c98cfbe
DDAG
1495/* Wait for activity on the completion channel.
1496 * Returns 0 on success, none-0 on error.
1497 */
1498static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
1499{
d5882995
LC
1500 struct rdma_cm_event *cm_event;
1501 int ret = -1;
1502
9c98cfbe
DDAG
1503 /*
1504 * Coroutine doesn't start until migration_fd_process_incoming()
1505 * so don't yield unless we know we're running inside of a coroutine.
1506 */
f5627c2a
LC
1507 if (rdma->migration_started_on_destination &&
1508 migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) {
9c98cfbe
DDAG
1509 yield_until_fd_readable(rdma->comp_channel->fd);
1510 } else {
1511 /* This is the source side, we're in a separate thread
1512 * or destination prior to migration_fd_process_incoming()
f5627c2a 1513 * after postcopy, the destination also in a seprate thread.
9c98cfbe
DDAG
1514 * we can't yield; so we have to poll the fd.
1515 * But we need to be able to handle 'cancel' or an error
1516 * without hanging forever.
1517 */
1518 while (!rdma->error_state && !rdma->received_error) {
d5882995 1519 GPollFD pfds[2];
9c98cfbe
DDAG
1520 pfds[0].fd = rdma->comp_channel->fd;
1521 pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
d5882995
LC
1522 pfds[0].revents = 0;
1523
1524 pfds[1].fd = rdma->channel->fd;
1525 pfds[1].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
1526 pfds[1].revents = 0;
1527
9c98cfbe 1528 /* 0.1s timeout, should be fine for a 'cancel' */
d5882995
LC
1529 switch (qemu_poll_ns(pfds, 2, 100 * 1000 * 1000)) {
1530 case 2:
9c98cfbe 1531 case 1: /* fd active */
d5882995
LC
1532 if (pfds[0].revents) {
1533 return 0;
1534 }
1535
1536 if (pfds[1].revents) {
1537 ret = rdma_get_cm_event(rdma->channel, &cm_event);
1538 if (!ret) {
1539 rdma_ack_cm_event(cm_event);
1540 }
1541
1542 error_report("receive cm event while wait comp channel,"
1543 "cm event is %d", cm_event->event);
1544 if (cm_event->event == RDMA_CM_EVENT_DISCONNECTED ||
1545 cm_event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
1546 return -EPIPE;
1547 }
1548 }
1549 break;
9c98cfbe
DDAG
1550
1551 case 0: /* Timeout, go around again */
1552 break;
1553
1554 default: /* Error of some type -
1555 * I don't trust errno from qemu_poll_ns
1556 */
1557 error_report("%s: poll failed", __func__);
1558 return -EPIPE;
1559 }
1560
1561 if (migrate_get_current()->state == MIGRATION_STATUS_CANCELLING) {
1562 /* Bail out and let the cancellation happen */
1563 return -EPIPE;
1564 }
1565 }
1566 }
1567
1568 if (rdma->received_error) {
1569 return -EPIPE;
1570 }
1571 return rdma->error_state;
1572}
1573
2da776db
MH
1574/*
1575 * Block until the next work request has completed.
1576 *
1577 * First poll to see if a work request has already completed,
1578 * otherwise block.
1579 *
1580 * If we encounter completed work requests for IDs other than
1581 * the one we're interested in, then that's generally an error.
1582 *
1583 * The only exception is actual RDMA Write completions. These
1584 * completions only need to be recorded, but do not actually
1585 * need further processing.
1586 */
88571882
IY
1587static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested,
1588 uint32_t *byte_len)
2da776db
MH
1589{
1590 int num_cq_events = 0, ret = 0;
1591 struct ibv_cq *cq;
1592 void *cq_ctx;
1593 uint64_t wr_id = RDMA_WRID_NONE, wr_id_in;
1594
1595 if (ibv_req_notify_cq(rdma->cq, 0)) {
1596 return -1;
1597 }
1598 /* poll cq first */
1599 while (wr_id != wrid_requested) {
88571882 1600 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1601 if (ret < 0) {
1602 return ret;
1603 }
1604
1605 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1606
1607 if (wr_id == RDMA_WRID_NONE) {
1608 break;
1609 }
1610 if (wr_id != wrid_requested) {
733252de
DDAG
1611 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1612 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1613 }
1614 }
1615
1616 if (wr_id == wrid_requested) {
1617 return 0;
1618 }
1619
1620 while (1) {
9c98cfbe
DDAG
1621 ret = qemu_rdma_wait_comp_channel(rdma);
1622 if (ret) {
1623 goto err_block_for_wrid;
2da776db
MH
1624 }
1625
0b3c15f0
DDAG
1626 ret = ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx);
1627 if (ret) {
2da776db
MH
1628 perror("ibv_get_cq_event");
1629 goto err_block_for_wrid;
1630 }
1631
1632 num_cq_events++;
1633
0b3c15f0
DDAG
1634 ret = -ibv_req_notify_cq(cq, 0);
1635 if (ret) {
2da776db
MH
1636 goto err_block_for_wrid;
1637 }
1638
1639 while (wr_id != wrid_requested) {
88571882 1640 ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len);
2da776db
MH
1641 if (ret < 0) {
1642 goto err_block_for_wrid;
1643 }
1644
1645 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
1646
1647 if (wr_id == RDMA_WRID_NONE) {
1648 break;
1649 }
1650 if (wr_id != wrid_requested) {
733252de
DDAG
1651 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested),
1652 wrid_requested, print_wrid(wr_id), wr_id);
2da776db
MH
1653 }
1654 }
1655
1656 if (wr_id == wrid_requested) {
1657 goto success_block_for_wrid;
1658 }
1659 }
1660
1661success_block_for_wrid:
1662 if (num_cq_events) {
1663 ibv_ack_cq_events(cq, num_cq_events);
1664 }
1665 return 0;
1666
1667err_block_for_wrid:
1668 if (num_cq_events) {
1669 ibv_ack_cq_events(cq, num_cq_events);
1670 }
0b3c15f0
DDAG
1671
1672 rdma->error_state = ret;
2da776db
MH
1673 return ret;
1674}
1675
1676/*
1677 * Post a SEND message work request for the control channel
1678 * containing some data and block until the post completes.
1679 */
1680static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
1681 RDMAControlHeader *head)
1682{
1683 int ret = 0;
1f22364b 1684 RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL];
2da776db
MH
1685 struct ibv_send_wr *bad_wr;
1686 struct ibv_sge sge = {
fbce8c25 1687 .addr = (uintptr_t)(wr->control),
2da776db
MH
1688 .length = head->len + sizeof(RDMAControlHeader),
1689 .lkey = wr->control_mr->lkey,
1690 };
1691 struct ibv_send_wr send_wr = {
1692 .wr_id = RDMA_WRID_SEND_CONTROL,
1693 .opcode = IBV_WR_SEND,
1694 .send_flags = IBV_SEND_SIGNALED,
1695 .sg_list = &sge,
1696 .num_sge = 1,
1697 };
1698
482a33c5 1699 trace_qemu_rdma_post_send_control(control_desc(head->type));
2da776db
MH
1700
1701 /*
1702 * We don't actually need to do a memcpy() in here if we used
1703 * the "sge" properly, but since we're only sending control messages
1704 * (not RAM in a performance-critical path), then its OK for now.
1705 *
1706 * The copy makes the RDMAControlHeader simpler to manipulate
1707 * for the time being.
1708 */
6f1484ed 1709 assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head));
2da776db
MH
1710 memcpy(wr->control, head, sizeof(RDMAControlHeader));
1711 control_to_network((void *) wr->control);
1712
1713 if (buf) {
1714 memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len);
1715 }
1716
1717
e325b49a 1718 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
2da776db 1719
e325b49a 1720 if (ret > 0) {
733252de 1721 error_report("Failed to use post IB SEND for control");
e325b49a 1722 return -ret;
2da776db
MH
1723 }
1724
88571882 1725 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
2da776db 1726 if (ret < 0) {
733252de 1727 error_report("rdma migration: send polling control error");
2da776db
MH
1728 }
1729
1730 return ret;
1731}
1732
1733/*
1734 * Post a RECV work request in anticipation of some future receipt
1735 * of data on the control channel.
1736 */
1737static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx)
1738{
1739 struct ibv_recv_wr *bad_wr;
1740 struct ibv_sge sge = {
fbce8c25 1741 .addr = (uintptr_t)(rdma->wr_data[idx].control),
2da776db
MH
1742 .length = RDMA_CONTROL_MAX_BUFFER,
1743 .lkey = rdma->wr_data[idx].control_mr->lkey,
1744 };
1745
1746 struct ibv_recv_wr recv_wr = {
1747 .wr_id = RDMA_WRID_RECV_CONTROL + idx,
1748 .sg_list = &sge,
1749 .num_sge = 1,
1750 };
1751
1752
1753 if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) {
1754 return -1;
1755 }
1756
1757 return 0;
1758}
1759
1760/*
1761 * Block and wait for a RECV control channel message to arrive.
1762 */
1763static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
1764 RDMAControlHeader *head, int expecting, int idx)
1765{
88571882
IY
1766 uint32_t byte_len;
1767 int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
1768 &byte_len);
2da776db
MH
1769
1770 if (ret < 0) {
733252de 1771 error_report("rdma migration: recv polling control error!");
2da776db
MH
1772 return ret;
1773 }
1774
1775 network_to_control((void *) rdma->wr_data[idx].control);
1776 memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
1777
482a33c5 1778 trace_qemu_rdma_exchange_get_response_start(control_desc(expecting));
2da776db
MH
1779
1780 if (expecting == RDMA_CONTROL_NONE) {
482a33c5 1781 trace_qemu_rdma_exchange_get_response_none(control_desc(head->type),
733252de 1782 head->type);
2da776db 1783 } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
733252de
DDAG
1784 error_report("Was expecting a %s (%d) control message"
1785 ", but got: %s (%d), length: %d",
482a33c5
DDAG
1786 control_desc(expecting), expecting,
1787 control_desc(head->type), head->type, head->len);
cd5ea070
DDAG
1788 if (head->type == RDMA_CONTROL_ERROR) {
1789 rdma->received_error = true;
1790 }
2da776db
MH
1791 return -EIO;
1792 }
6f1484ed 1793 if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
81b07353 1794 error_report("too long length: %d", head->len);
6f1484ed
IY
1795 return -EINVAL;
1796 }
88571882 1797 if (sizeof(*head) + head->len != byte_len) {
733252de 1798 error_report("Malformed length: %d byte_len %d", head->len, byte_len);
88571882
IY
1799 return -EINVAL;
1800 }
2da776db
MH
1801
1802 return 0;
1803}
1804
1805/*
1806 * When a RECV work request has completed, the work request's
1807 * buffer is pointed at the header.
1808 *
1809 * This will advance the pointer to the data portion
1810 * of the control message of the work request's buffer that
1811 * was populated after the work request finished.
1812 */
1813static void qemu_rdma_move_header(RDMAContext *rdma, int idx,
1814 RDMAControlHeader *head)
1815{
1816 rdma->wr_data[idx].control_len = head->len;
1817 rdma->wr_data[idx].control_curr =
1818 rdma->wr_data[idx].control + sizeof(RDMAControlHeader);
1819}
1820
1821/*
1822 * This is an 'atomic' high-level operation to deliver a single, unified
1823 * control-channel message.
1824 *
1825 * Additionally, if the user is expecting some kind of reply to this message,
1826 * they can request a 'resp' response message be filled in by posting an
1827 * additional work request on behalf of the user and waiting for an additional
1828 * completion.
1829 *
1830 * The extra (optional) response is used during registration to us from having
1831 * to perform an *additional* exchange of message just to provide a response by
1832 * instead piggy-backing on the acknowledgement.
1833 */
1834static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
1835 uint8_t *data, RDMAControlHeader *resp,
1836 int *resp_idx,
1837 int (*callback)(RDMAContext *rdma))
1838{
1839 int ret = 0;
1840
1841 /*
1842 * Wait until the dest is ready before attempting to deliver the message
1843 * by waiting for a READY message.
1844 */
1845 if (rdma->control_ready_expected) {
1846 RDMAControlHeader resp;
1847 ret = qemu_rdma_exchange_get_response(rdma,
1848 &resp, RDMA_CONTROL_READY, RDMA_WRID_READY);
1849 if (ret < 0) {
1850 return ret;
1851 }
1852 }
1853
1854 /*
1855 * If the user is expecting a response, post a WR in anticipation of it.
1856 */
1857 if (resp) {
1858 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA);
1859 if (ret) {
733252de 1860 error_report("rdma migration: error posting"
2da776db
MH
1861 " extra control recv for anticipated result!");
1862 return ret;
1863 }
1864 }
1865
1866 /*
1867 * Post a WR to replace the one we just consumed for the READY message.
1868 */
1869 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1870 if (ret) {
733252de 1871 error_report("rdma migration: error posting first control recv!");
2da776db
MH
1872 return ret;
1873 }
1874
1875 /*
1876 * Deliver the control message that was requested.
1877 */
1878 ret = qemu_rdma_post_send_control(rdma, data, head);
1879
1880 if (ret < 0) {
733252de 1881 error_report("Failed to send control buffer!");
2da776db
MH
1882 return ret;
1883 }
1884
1885 /*
1886 * If we're expecting a response, block and wait for it.
1887 */
1888 if (resp) {
1889 if (callback) {
733252de 1890 trace_qemu_rdma_exchange_send_issue_callback();
2da776db
MH
1891 ret = callback(rdma);
1892 if (ret < 0) {
1893 return ret;
1894 }
1895 }
1896
482a33c5 1897 trace_qemu_rdma_exchange_send_waiting(control_desc(resp->type));
2da776db
MH
1898 ret = qemu_rdma_exchange_get_response(rdma, resp,
1899 resp->type, RDMA_WRID_DATA);
1900
1901 if (ret < 0) {
1902 return ret;
1903 }
1904
1905 qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
1906 if (resp_idx) {
1907 *resp_idx = RDMA_WRID_DATA;
1908 }
482a33c5 1909 trace_qemu_rdma_exchange_send_received(control_desc(resp->type));
2da776db
MH
1910 }
1911
1912 rdma->control_ready_expected = 1;
1913
1914 return 0;
1915}
1916
1917/*
1918 * This is an 'atomic' high-level operation to receive a single, unified
1919 * control-channel message.
1920 */
1921static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
1922 int expecting)
1923{
1924 RDMAControlHeader ready = {
1925 .len = 0,
1926 .type = RDMA_CONTROL_READY,
1927 .repeat = 1,
1928 };
1929 int ret;
1930
1931 /*
1932 * Inform the source that we're ready to receive a message.
1933 */
1934 ret = qemu_rdma_post_send_control(rdma, NULL, &ready);
1935
1936 if (ret < 0) {
733252de 1937 error_report("Failed to send control buffer!");
2da776db
MH
1938 return ret;
1939 }
1940
1941 /*
1942 * Block and wait for the message.
1943 */
1944 ret = qemu_rdma_exchange_get_response(rdma, head,
1945 expecting, RDMA_WRID_READY);
1946
1947 if (ret < 0) {
1948 return ret;
1949 }
1950
1951 qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
1952
1953 /*
1954 * Post a new RECV work request to replace the one we just consumed.
1955 */
1956 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
1957 if (ret) {
733252de 1958 error_report("rdma migration: error posting second control recv!");
2da776db
MH
1959 return ret;
1960 }
1961
1962 return 0;
1963}
1964
1965/*
1966 * Write an actual chunk of memory using RDMA.
1967 *
1968 * If we're using dynamic registration on the dest-side, we have to
1969 * send a registration command first.
1970 */
1971static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
1972 int current_index, uint64_t current_addr,
1973 uint64_t length)
1974{
1975 struct ibv_sge sge;
1976 struct ibv_send_wr send_wr = { 0 };
1977 struct ibv_send_wr *bad_wr;
1978 int reg_result_idx, ret, count = 0;
1979 uint64_t chunk, chunks;
1980 uint8_t *chunk_start, *chunk_end;
1981 RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
1982 RDMARegister reg;
1983 RDMARegisterResult *reg_result;
1984 RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
1985 RDMAControlHeader head = { .len = sizeof(RDMARegister),
1986 .type = RDMA_CONTROL_REGISTER_REQUEST,
1987 .repeat = 1,
1988 };
1989
1990retry:
fbce8c25 1991 sge.addr = (uintptr_t)(block->local_host_addr +
2da776db
MH
1992 (current_addr - block->offset));
1993 sge.length = length;
1994
fbce8c25
SW
1995 chunk = ram_chunk_index(block->local_host_addr,
1996 (uint8_t *)(uintptr_t)sge.addr);
2da776db
MH
1997 chunk_start = ram_chunk_start(block, chunk);
1998
1999 if (block->is_ram_block) {
2000 chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
2001
2002 if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
2003 chunks--;
2004 }
2005 } else {
2006 chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
2007
2008 if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
2009 chunks--;
2010 }
2011 }
2012
733252de
DDAG
2013 trace_qemu_rdma_write_one_top(chunks + 1,
2014 (chunks + 1) *
2015 (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
2da776db
MH
2016
2017 chunk_end = ram_chunk_end(block, chunk + chunks);
2018
2019 if (!rdma->pin_all) {
2020#ifdef RDMA_UNREGISTRATION_EXAMPLE
2021 qemu_rdma_unregister_waiting(rdma);
2022#endif
2023 }
2024
2025 while (test_bit(chunk, block->transit_bitmap)) {
2026 (void)count;
733252de 2027 trace_qemu_rdma_write_one_block(count++, current_index, chunk,
2da776db
MH
2028 sge.addr, length, rdma->nb_sent, block->nb_chunks);
2029
88571882 2030 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db
MH
2031
2032 if (ret < 0) {
733252de 2033 error_report("Failed to Wait for previous write to complete "
2da776db 2034 "block %d chunk %" PRIu64
733252de 2035 " current %" PRIu64 " len %" PRIu64 " %d",
2da776db
MH
2036 current_index, chunk, sge.addr, length, rdma->nb_sent);
2037 return ret;
2038 }
2039 }
2040
2041 if (!rdma->pin_all || !block->is_ram_block) {
2042 if (!block->remote_keys[chunk]) {
2043 /*
2044 * This chunk has not yet been registered, so first check to see
2045 * if the entire chunk is zero. If so, tell the other size to
2046 * memset() + madvise() the entire chunk without RDMA.
2047 */
2048
a1febc49 2049 if (buffer_is_zero((void *)(uintptr_t)sge.addr, length)) {
2da776db
MH
2050 RDMACompress comp = {
2051 .offset = current_addr,
2052 .value = 0,
2053 .block_idx = current_index,
2054 .length = length,
2055 };
2056
2057 head.len = sizeof(comp);
2058 head.type = RDMA_CONTROL_COMPRESS;
2059
733252de
DDAG
2060 trace_qemu_rdma_write_one_zero(chunk, sge.length,
2061 current_index, current_addr);
2da776db 2062
b12f7777 2063 compress_to_network(rdma, &comp);
2da776db
MH
2064 ret = qemu_rdma_exchange_send(rdma, &head,
2065 (uint8_t *) &comp, NULL, NULL, NULL);
2066
2067 if (ret < 0) {
2068 return -EIO;
2069 }
2070
2071 acct_update_position(f, sge.length, true);
2072
2073 return 1;
2074 }
2075
2076 /*
2077 * Otherwise, tell other side to register.
2078 */
2079 reg.current_index = current_index;
2080 if (block->is_ram_block) {
2081 reg.key.current_addr = current_addr;
2082 } else {
2083 reg.key.chunk = chunk;
2084 }
2085 reg.chunks = chunks;
2086
733252de
DDAG
2087 trace_qemu_rdma_write_one_sendreg(chunk, sge.length, current_index,
2088 current_addr);
2da776db 2089
b12f7777 2090 register_to_network(rdma, &reg);
2da776db
MH
2091 ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
2092 &resp, &reg_result_idx, NULL);
2093 if (ret < 0) {
2094 return ret;
2095 }
2096
2097 /* try to overlap this single registration with the one we sent. */
3ac040c0 2098 if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr,
2da776db
MH
2099 &sge.lkey, NULL, chunk,
2100 chunk_start, chunk_end)) {
733252de 2101 error_report("cannot get lkey");
2da776db
MH
2102 return -EINVAL;
2103 }
2104
2105 reg_result = (RDMARegisterResult *)
2106 rdma->wr_data[reg_result_idx].control_curr;
2107
2108 network_to_result(reg_result);
2109
733252de
DDAG
2110 trace_qemu_rdma_write_one_recvregres(block->remote_keys[chunk],
2111 reg_result->rkey, chunk);
2da776db
MH
2112
2113 block->remote_keys[chunk] = reg_result->rkey;
2114 block->remote_host_addr = reg_result->host_addr;
2115 } else {
2116 /* already registered before */
3ac040c0 2117 if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr,
2da776db
MH
2118 &sge.lkey, NULL, chunk,
2119 chunk_start, chunk_end)) {
733252de 2120 error_report("cannot get lkey!");
2da776db
MH
2121 return -EINVAL;
2122 }
2123 }
2124
2125 send_wr.wr.rdma.rkey = block->remote_keys[chunk];
2126 } else {
2127 send_wr.wr.rdma.rkey = block->remote_rkey;
2128
3ac040c0 2129 if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr,
2da776db
MH
2130 &sge.lkey, NULL, chunk,
2131 chunk_start, chunk_end)) {
733252de 2132 error_report("cannot get lkey!");
2da776db
MH
2133 return -EINVAL;
2134 }
2135 }
2136
2137 /*
2138 * Encode the ram block index and chunk within this wrid.
2139 * We will use this information at the time of completion
2140 * to figure out which bitmap to check against and then which
2141 * chunk in the bitmap to look for.
2142 */
2143 send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
2144 current_index, chunk);
2145
2146 send_wr.opcode = IBV_WR_RDMA_WRITE;
2147 send_wr.send_flags = IBV_SEND_SIGNALED;
2148 send_wr.sg_list = &sge;
2149 send_wr.num_sge = 1;
2150 send_wr.wr.rdma.remote_addr = block->remote_host_addr +
2151 (current_addr - block->offset);
2152
733252de
DDAG
2153 trace_qemu_rdma_write_one_post(chunk, sge.addr, send_wr.wr.rdma.remote_addr,
2154 sge.length);
2da776db
MH
2155
2156 /*
2157 * ibv_post_send() does not return negative error numbers,
2158 * per the specification they are positive - no idea why.
2159 */
2160 ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
2161
2162 if (ret == ENOMEM) {
733252de 2163 trace_qemu_rdma_write_one_queue_full();
88571882 2164 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2165 if (ret < 0) {
733252de
DDAG
2166 error_report("rdma migration: failed to make "
2167 "room in full send queue! %d", ret);
2da776db
MH
2168 return ret;
2169 }
2170
2171 goto retry;
2172
2173 } else if (ret > 0) {
2174 perror("rdma migration: post rdma write failed");
2175 return -ret;
2176 }
2177
2178 set_bit(chunk, block->transit_bitmap);
2179 acct_update_position(f, sge.length, false);
2180 rdma->total_writes++;
2181
2182 return 0;
2183}
2184
2185/*
2186 * Push out any unwritten RDMA operations.
2187 *
2188 * We support sending out multiple chunks at the same time.
2189 * Not all of them need to get signaled in the completion queue.
2190 */
2191static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
2192{
2193 int ret;
2194
2195 if (!rdma->current_length) {
2196 return 0;
2197 }
2198
2199 ret = qemu_rdma_write_one(f, rdma,
2200 rdma->current_index, rdma->current_addr, rdma->current_length);
2201
2202 if (ret < 0) {
2203 return ret;
2204 }
2205
2206 if (ret == 0) {
2207 rdma->nb_sent++;
733252de 2208 trace_qemu_rdma_write_flush(rdma->nb_sent);
2da776db
MH
2209 }
2210
2211 rdma->current_length = 0;
2212 rdma->current_addr = 0;
2213
2214 return 0;
2215}
2216
2217static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
2218 uint64_t offset, uint64_t len)
2219{
44b59494
IY
2220 RDMALocalBlock *block;
2221 uint8_t *host_addr;
2222 uint8_t *chunk_end;
2223
2224 if (rdma->current_index < 0) {
2225 return 0;
2226 }
2227
2228 if (rdma->current_chunk < 0) {
2229 return 0;
2230 }
2231
2232 block = &(rdma->local_ram_blocks.block[rdma->current_index]);
2233 host_addr = block->local_host_addr + (offset - block->offset);
2234 chunk_end = ram_chunk_end(block, rdma->current_chunk);
2da776db
MH
2235
2236 if (rdma->current_length == 0) {
2237 return 0;
2238 }
2239
2240 /*
2241 * Only merge into chunk sequentially.
2242 */
2243 if (offset != (rdma->current_addr + rdma->current_length)) {
2244 return 0;
2245 }
2246
2da776db
MH
2247 if (offset < block->offset) {
2248 return 0;
2249 }
2250
2251 if ((offset + len) > (block->offset + block->length)) {
2252 return 0;
2253 }
2254
2da776db
MH
2255 if ((host_addr + len) > chunk_end) {
2256 return 0;
2257 }
2258
2259 return 1;
2260}
2261
2262/*
2263 * We're not actually writing here, but doing three things:
2264 *
2265 * 1. Identify the chunk the buffer belongs to.
2266 * 2. If the chunk is full or the buffer doesn't belong to the current
2267 * chunk, then start a new chunk and flush() the old chunk.
2268 * 3. To keep the hardware busy, we also group chunks into batches
2269 * and only require that a batch gets acknowledged in the completion
2270 * qeueue instead of each individual chunk.
2271 */
2272static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
2273 uint64_t block_offset, uint64_t offset,
2274 uint64_t len)
2275{
2276 uint64_t current_addr = block_offset + offset;
2277 uint64_t index = rdma->current_index;
2278 uint64_t chunk = rdma->current_chunk;
2279 int ret;
2280
2281 /* If we cannot merge it, we flush the current buffer first. */
2282 if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
2283 ret = qemu_rdma_write_flush(f, rdma);
2284 if (ret) {
2285 return ret;
2286 }
2287 rdma->current_length = 0;
2288 rdma->current_addr = current_addr;
2289
2290 ret = qemu_rdma_search_ram_block(rdma, block_offset,
2291 offset, len, &index, &chunk);
2292 if (ret) {
733252de 2293 error_report("ram block search failed");
2da776db
MH
2294 return ret;
2295 }
2296 rdma->current_index = index;
2297 rdma->current_chunk = chunk;
2298 }
2299
2300 /* merge it */
2301 rdma->current_length += len;
2302
2303 /* flush it if buffer is too large */
2304 if (rdma->current_length >= RDMA_MERGE_MAX) {
2305 return qemu_rdma_write_flush(f, rdma);
2306 }
2307
2308 return 0;
2309}
2310
2311static void qemu_rdma_cleanup(RDMAContext *rdma)
2312{
c5e76115 2313 int idx;
2da776db 2314
5a91337c 2315 if (rdma->cm_id && rdma->connected) {
32bce196
DDAG
2316 if ((rdma->error_state ||
2317 migrate_get_current()->state == MIGRATION_STATUS_CANCELLING) &&
2318 !rdma->received_error) {
2da776db
MH
2319 RDMAControlHeader head = { .len = 0,
2320 .type = RDMA_CONTROL_ERROR,
2321 .repeat = 1,
2322 };
733252de 2323 error_report("Early error. Sending error.");
2da776db
MH
2324 qemu_rdma_post_send_control(rdma, NULL, &head);
2325 }
2326
c5e76115 2327 rdma_disconnect(rdma->cm_id);
733252de 2328 trace_qemu_rdma_cleanup_disconnect();
5a91337c 2329 rdma->connected = false;
2da776db
MH
2330 }
2331
cf75e268
DDAG
2332 if (rdma->channel) {
2333 qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL);
2334 }
a97270ad
DDAG
2335 g_free(rdma->dest_blocks);
2336 rdma->dest_blocks = NULL;
2da776db 2337
1f22364b 2338 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2339 if (rdma->wr_data[idx].control_mr) {
2340 rdma->total_registrations--;
2341 ibv_dereg_mr(rdma->wr_data[idx].control_mr);
2342 }
2343 rdma->wr_data[idx].control_mr = NULL;
2344 }
2345
2346 if (rdma->local_ram_blocks.block) {
2347 while (rdma->local_ram_blocks.nb_blocks) {
03fcab38 2348 rdma_delete_block(rdma, &rdma->local_ram_blocks.block[0]);
2da776db
MH
2349 }
2350 }
2351
80b262e1
PR
2352 if (rdma->qp) {
2353 rdma_destroy_qp(rdma->cm_id);
2354 rdma->qp = NULL;
2355 }
2da776db
MH
2356 if (rdma->cq) {
2357 ibv_destroy_cq(rdma->cq);
2358 rdma->cq = NULL;
2359 }
2360 if (rdma->comp_channel) {
2361 ibv_destroy_comp_channel(rdma->comp_channel);
2362 rdma->comp_channel = NULL;
2363 }
2364 if (rdma->pd) {
2365 ibv_dealloc_pd(rdma->pd);
2366 rdma->pd = NULL;
2367 }
2da776db
MH
2368 if (rdma->cm_id) {
2369 rdma_destroy_id(rdma->cm_id);
2370 rdma->cm_id = NULL;
2371 }
55cc1b59
LC
2372
2373 /* the destination side, listen_id and channel is shared */
80b262e1 2374 if (rdma->listen_id) {
55cc1b59
LC
2375 if (!rdma->is_return_path) {
2376 rdma_destroy_id(rdma->listen_id);
2377 }
80b262e1 2378 rdma->listen_id = NULL;
55cc1b59
LC
2379
2380 if (rdma->channel) {
2381 if (!rdma->is_return_path) {
2382 rdma_destroy_event_channel(rdma->channel);
2383 }
2384 rdma->channel = NULL;
2385 }
80b262e1 2386 }
55cc1b59 2387
2da776db
MH
2388 if (rdma->channel) {
2389 rdma_destroy_event_channel(rdma->channel);
2390 rdma->channel = NULL;
2391 }
e1d0fb37
IY
2392 g_free(rdma->host);
2393 rdma->host = NULL;
2da776db
MH
2394}
2395
2396
bbfb89e3 2397static int qemu_rdma_source_init(RDMAContext *rdma, bool pin_all, Error **errp)
2da776db
MH
2398{
2399 int ret, idx;
2400 Error *local_err = NULL, **temp = &local_err;
2401
2402 /*
2403 * Will be validated against destination's actual capabilities
2404 * after the connect() completes.
2405 */
2406 rdma->pin_all = pin_all;
2407
2408 ret = qemu_rdma_resolve_host(rdma, temp);
2409 if (ret) {
2410 goto err_rdma_source_init;
2411 }
2412
2413 ret = qemu_rdma_alloc_pd_cq(rdma);
2414 if (ret) {
2415 ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()"
2416 " limits may be too low. Please check $ ulimit -a # and "
66988941 2417 "search for 'ulimit -l' in the output");
2da776db
MH
2418 goto err_rdma_source_init;
2419 }
2420
2421 ret = qemu_rdma_alloc_qp(rdma);
2422 if (ret) {
66988941 2423 ERROR(temp, "rdma migration: error allocating qp!");
2da776db
MH
2424 goto err_rdma_source_init;
2425 }
2426
2427 ret = qemu_rdma_init_ram_blocks(rdma);
2428 if (ret) {
66988941 2429 ERROR(temp, "rdma migration: error initializing ram blocks!");
2da776db
MH
2430 goto err_rdma_source_init;
2431 }
2432
760ff4be
DDAG
2433 /* Build the hash that maps from offset to RAMBlock */
2434 rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal);
2435 for (idx = 0; idx < rdma->local_ram_blocks.nb_blocks; idx++) {
2436 g_hash_table_insert(rdma->blockmap,
2437 (void *)(uintptr_t)rdma->local_ram_blocks.block[idx].offset,
2438 &rdma->local_ram_blocks.block[idx]);
2439 }
2440
1f22364b 2441 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2442 ret = qemu_rdma_reg_control(rdma, idx);
2443 if (ret) {
66988941 2444 ERROR(temp, "rdma migration: error registering %d control!",
2da776db
MH
2445 idx);
2446 goto err_rdma_source_init;
2447 }
2448 }
2449
2450 return 0;
2451
2452err_rdma_source_init:
2453 error_propagate(errp, local_err);
2454 qemu_rdma_cleanup(rdma);
2455 return -1;
2456}
2457
2458static int qemu_rdma_connect(RDMAContext *rdma, Error **errp)
2459{
2460 RDMACapabilities cap = {
2461 .version = RDMA_CONTROL_VERSION_CURRENT,
2462 .flags = 0,
2463 };
2464 struct rdma_conn_param conn_param = { .initiator_depth = 2,
2465 .retry_count = 5,
2466 .private_data = &cap,
2467 .private_data_len = sizeof(cap),
2468 };
2469 struct rdma_cm_event *cm_event;
2470 int ret;
2471
2472 /*
2473 * Only negotiate the capability with destination if the user
2474 * on the source first requested the capability.
2475 */
2476 if (rdma->pin_all) {
733252de 2477 trace_qemu_rdma_connect_pin_all_requested();
2da776db
MH
2478 cap.flags |= RDMA_CAPABILITY_PIN_ALL;
2479 }
2480
2481 caps_to_network(&cap);
2482
9cf2bab2
DDAG
2483 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2484 if (ret) {
2485 ERROR(errp, "posting second control recv");
2486 goto err_rdma_source_connect;
2487 }
2488
2da776db
MH
2489 ret = rdma_connect(rdma->cm_id, &conn_param);
2490 if (ret) {
2491 perror("rdma_connect");
66988941 2492 ERROR(errp, "connecting to destination!");
2da776db
MH
2493 goto err_rdma_source_connect;
2494 }
2495
2496 ret = rdma_get_cm_event(rdma->channel, &cm_event);
2497 if (ret) {
2498 perror("rdma_get_cm_event after rdma_connect");
66988941 2499 ERROR(errp, "connecting to destination!");
2da776db 2500 rdma_ack_cm_event(cm_event);
2da776db
MH
2501 goto err_rdma_source_connect;
2502 }
2503
2504 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
2505 perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
66988941 2506 ERROR(errp, "connecting to destination!");
2da776db 2507 rdma_ack_cm_event(cm_event);
2da776db
MH
2508 goto err_rdma_source_connect;
2509 }
5a91337c 2510 rdma->connected = true;
2da776db
MH
2511
2512 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
2513 network_to_caps(&cap);
2514
2515 /*
2516 * Verify that the *requested* capabilities are supported by the destination
2517 * and disable them otherwise.
2518 */
2519 if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) {
2520 ERROR(errp, "Server cannot support pinning all memory. "
66988941 2521 "Will register memory dynamically.");
2da776db
MH
2522 rdma->pin_all = false;
2523 }
2524
733252de 2525 trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all);
2da776db
MH
2526
2527 rdma_ack_cm_event(cm_event);
2528
2da776db
MH
2529 rdma->control_ready_expected = 1;
2530 rdma->nb_sent = 0;
2531 return 0;
2532
2533err_rdma_source_connect:
2534 qemu_rdma_cleanup(rdma);
2535 return -1;
2536}
2537
2538static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp)
2539{
1dbd2fd9 2540 int ret, idx;
2da776db
MH
2541 struct rdma_cm_id *listen_id;
2542 char ip[40] = "unknown";
1dbd2fd9 2543 struct rdma_addrinfo *res, *e;
b58c8552 2544 char port_str[16];
2da776db 2545
1f22364b 2546 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
2547 rdma->wr_data[idx].control_len = 0;
2548 rdma->wr_data[idx].control_curr = NULL;
2549 }
2550
1dbd2fd9 2551 if (!rdma->host || !rdma->host[0]) {
66988941 2552 ERROR(errp, "RDMA host is not set!");
2da776db
MH
2553 rdma->error_state = -EINVAL;
2554 return -1;
2555 }
2556 /* create CM channel */
2557 rdma->channel = rdma_create_event_channel();
2558 if (!rdma->channel) {
66988941 2559 ERROR(errp, "could not create rdma event channel");
2da776db
MH
2560 rdma->error_state = -EINVAL;
2561 return -1;
2562 }
2563
2564 /* create CM id */
2565 ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP);
2566 if (ret) {
66988941 2567 ERROR(errp, "could not create cm_id!");
2da776db
MH
2568 goto err_dest_init_create_listen_id;
2569 }
2570
b58c8552
MH
2571 snprintf(port_str, 16, "%d", rdma->port);
2572 port_str[15] = '\0';
2da776db 2573
1dbd2fd9
MT
2574 ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res);
2575 if (ret < 0) {
2576 ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host);
2577 goto err_dest_init_bind_addr;
2578 }
6470215b 2579
1dbd2fd9
MT
2580 for (e = res; e != NULL; e = e->ai_next) {
2581 inet_ntop(e->ai_family,
2582 &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip);
2583 trace_qemu_rdma_dest_init_trying(rdma->host, ip);
2584 ret = rdma_bind_addr(listen_id, e->ai_dst_addr);
2585 if (ret) {
2586 continue;
2da776db 2587 }
1dbd2fd9 2588 if (e->ai_family == AF_INET6) {
bbfb89e3 2589 ret = qemu_rdma_broken_ipv6_kernel(listen_id->verbs, errp);
1dbd2fd9
MT
2590 if (ret) {
2591 continue;
6470215b
MH
2592 }
2593 }
1dbd2fd9
MT
2594 break;
2595 }
b58c8552 2596
1dbd2fd9 2597 if (!e) {
6470215b
MH
2598 ERROR(errp, "Error: could not rdma_bind_addr!");
2599 goto err_dest_init_bind_addr;
2da776db 2600 }
2da776db
MH
2601
2602 rdma->listen_id = listen_id;
2603 qemu_rdma_dump_gid("dest_init", listen_id);
2604 return 0;
2605
2606err_dest_init_bind_addr:
2607 rdma_destroy_id(listen_id);
2608err_dest_init_create_listen_id:
2609 rdma_destroy_event_channel(rdma->channel);
2610 rdma->channel = NULL;
2611 rdma->error_state = ret;
2612 return ret;
2613
2614}
2615
55cc1b59
LC
2616static void qemu_rdma_return_path_dest_init(RDMAContext *rdma_return_path,
2617 RDMAContext *rdma)
2618{
2619 int idx;
2620
2621 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2622 rdma_return_path->wr_data[idx].control_len = 0;
2623 rdma_return_path->wr_data[idx].control_curr = NULL;
2624 }
2625
2626 /*the CM channel and CM id is shared*/
2627 rdma_return_path->channel = rdma->channel;
2628 rdma_return_path->listen_id = rdma->listen_id;
2629
2630 rdma->return_path = rdma_return_path;
2631 rdma_return_path->return_path = rdma;
2632 rdma_return_path->is_return_path = true;
2633}
2634
2da776db
MH
2635static void *qemu_rdma_data_init(const char *host_port, Error **errp)
2636{
2637 RDMAContext *rdma = NULL;
2638 InetSocketAddress *addr;
2639
2640 if (host_port) {
97f3ad35 2641 rdma = g_new0(RDMAContext, 1);
2da776db
MH
2642 rdma->current_index = -1;
2643 rdma->current_chunk = -1;
2644
0785bd7a
MA
2645 addr = g_new(InetSocketAddress, 1);
2646 if (!inet_parse(addr, host_port, NULL)) {
2da776db
MH
2647 rdma->port = atoi(addr->port);
2648 rdma->host = g_strdup(addr->host);
2649 } else {
2650 ERROR(errp, "bad RDMA migration address '%s'", host_port);
2651 g_free(rdma);
e325b49a 2652 rdma = NULL;
2da776db 2653 }
e325b49a
MH
2654
2655 qapi_free_InetSocketAddress(addr);
2da776db
MH
2656 }
2657
2658 return rdma;
2659}
2660
2661/*
2662 * QEMUFile interface to the control channel.
2663 * SEND messages for control only.
971ae6ef 2664 * VM's ram is handled with regular RDMA messages.
2da776db 2665 */
6ddd2d76
DB
2666static ssize_t qio_channel_rdma_writev(QIOChannel *ioc,
2667 const struct iovec *iov,
2668 size_t niov,
2669 int *fds,
2670 size_t nfds,
2671 Error **errp)
2672{
2673 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
2674 QEMUFile *f = rioc->file;
74637e6f 2675 RDMAContext *rdma;
2da776db 2676 int ret;
6ddd2d76
DB
2677 ssize_t done = 0;
2678 size_t i;
f38f6d41 2679 size_t len = 0;
2da776db 2680
74637e6f
LC
2681 rcu_read_lock();
2682 rdma = atomic_rcu_read(&rioc->rdmaout);
2683
2684 if (!rdma) {
2685 rcu_read_unlock();
2686 return -EIO;
2687 }
2688
2da776db
MH
2689 CHECK_ERROR_STATE();
2690
2691 /*
2692 * Push out any writes that
971ae6ef 2693 * we're queued up for VM's ram.
2da776db
MH
2694 */
2695 ret = qemu_rdma_write_flush(f, rdma);
2696 if (ret < 0) {
2697 rdma->error_state = ret;
74637e6f 2698 rcu_read_unlock();
2da776db
MH
2699 return ret;
2700 }
2701
6ddd2d76
DB
2702 for (i = 0; i < niov; i++) {
2703 size_t remaining = iov[i].iov_len;
2704 uint8_t * data = (void *)iov[i].iov_base;
2705 while (remaining) {
2706 RDMAControlHeader head;
2da776db 2707
f38f6d41
LC
2708 len = MIN(remaining, RDMA_SEND_INCREMENT);
2709 remaining -= len;
2da776db 2710
f38f6d41 2711 head.len = len;
6ddd2d76 2712 head.type = RDMA_CONTROL_QEMU_FILE;
2da776db 2713
6ddd2d76 2714 ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL);
2da776db 2715
6ddd2d76
DB
2716 if (ret < 0) {
2717 rdma->error_state = ret;
74637e6f 2718 rcu_read_unlock();
6ddd2d76
DB
2719 return ret;
2720 }
2da776db 2721
f38f6d41
LC
2722 data += len;
2723 done += len;
6ddd2d76 2724 }
2da776db
MH
2725 }
2726
74637e6f 2727 rcu_read_unlock();
6ddd2d76 2728 return done;
2da776db
MH
2729}
2730
2731static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf,
a202a4c0 2732 size_t size, int idx)
2da776db
MH
2733{
2734 size_t len = 0;
2735
2736 if (rdma->wr_data[idx].control_len) {
733252de 2737 trace_qemu_rdma_fill(rdma->wr_data[idx].control_len, size);
2da776db
MH
2738
2739 len = MIN(size, rdma->wr_data[idx].control_len);
2740 memcpy(buf, rdma->wr_data[idx].control_curr, len);
2741 rdma->wr_data[idx].control_curr += len;
2742 rdma->wr_data[idx].control_len -= len;
2743 }
2744
2745 return len;
2746}
2747
2748/*
2749 * QEMUFile interface to the control channel.
2750 * RDMA links don't use bytestreams, so we have to
2751 * return bytes to QEMUFile opportunistically.
2752 */
6ddd2d76
DB
2753static ssize_t qio_channel_rdma_readv(QIOChannel *ioc,
2754 const struct iovec *iov,
2755 size_t niov,
2756 int **fds,
2757 size_t *nfds,
2758 Error **errp)
2759{
2760 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
74637e6f 2761 RDMAContext *rdma;
2da776db
MH
2762 RDMAControlHeader head;
2763 int ret = 0;
6ddd2d76
DB
2764 ssize_t i;
2765 size_t done = 0;
2da776db 2766
74637e6f
LC
2767 rcu_read_lock();
2768 rdma = atomic_rcu_read(&rioc->rdmain);
2769
2770 if (!rdma) {
2771 rcu_read_unlock();
2772 return -EIO;
2773 }
2774
2da776db
MH
2775 CHECK_ERROR_STATE();
2776
6ddd2d76
DB
2777 for (i = 0; i < niov; i++) {
2778 size_t want = iov[i].iov_len;
2779 uint8_t *data = (void *)iov[i].iov_base;
2da776db 2780
6ddd2d76
DB
2781 /*
2782 * First, we hold on to the last SEND message we
2783 * were given and dish out the bytes until we run
2784 * out of bytes.
2785 */
74637e6f 2786 ret = qemu_rdma_fill(rdma, data, want, 0);
6ddd2d76
DB
2787 done += ret;
2788 want -= ret;
2789 /* Got what we needed, so go to next iovec */
2790 if (want == 0) {
2791 continue;
2792 }
2da776db 2793
6ddd2d76
DB
2794 /* If we got any data so far, then don't wait
2795 * for more, just return what we have */
2796 if (done > 0) {
2797 break;
2798 }
2da776db 2799
6ddd2d76
DB
2800
2801 /* We've got nothing at all, so lets wait for
2802 * more to arrive
2803 */
2804 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE);
2805
2806 if (ret < 0) {
2807 rdma->error_state = ret;
74637e6f 2808 rcu_read_unlock();
6ddd2d76
DB
2809 return ret;
2810 }
2811
2812 /*
2813 * SEND was received with new bytes, now try again.
2814 */
74637e6f 2815 ret = qemu_rdma_fill(rdma, data, want, 0);
6ddd2d76
DB
2816 done += ret;
2817 want -= ret;
2818
2819 /* Still didn't get enough, so lets just return */
2820 if (want) {
2821 if (done == 0) {
74637e6f 2822 rcu_read_unlock();
6ddd2d76
DB
2823 return QIO_CHANNEL_ERR_BLOCK;
2824 } else {
2825 break;
2826 }
2827 }
2828 }
74637e6f 2829 rcu_read_unlock();
f38f6d41 2830 return done;
2da776db
MH
2831}
2832
2833/*
2834 * Block until all the outstanding chunks have been delivered by the hardware.
2835 */
2836static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
2837{
2838 int ret;
2839
2840 if (qemu_rdma_write_flush(f, rdma) < 0) {
2841 return -EIO;
2842 }
2843
2844 while (rdma->nb_sent) {
88571882 2845 ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
2da776db 2846 if (ret < 0) {
733252de 2847 error_report("rdma migration: complete polling error!");
2da776db
MH
2848 return -EIO;
2849 }
2850 }
2851
2852 qemu_rdma_unregister_waiting(rdma);
2853
2854 return 0;
2855}
2856
6ddd2d76
DB
2857
2858static int qio_channel_rdma_set_blocking(QIOChannel *ioc,
2859 bool blocking,
2860 Error **errp)
2861{
2862 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
2863 /* XXX we should make readv/writev actually honour this :-) */
2864 rioc->blocking = blocking;
2865 return 0;
2866}
2867
2868
2869typedef struct QIOChannelRDMASource QIOChannelRDMASource;
2870struct QIOChannelRDMASource {
2871 GSource parent;
2872 QIOChannelRDMA *rioc;
2873 GIOCondition condition;
2874};
2875
2876static gboolean
2877qio_channel_rdma_source_prepare(GSource *source,
2878 gint *timeout)
2879{
2880 QIOChannelRDMASource *rsource = (QIOChannelRDMASource *)source;
74637e6f 2881 RDMAContext *rdma;
6ddd2d76
DB
2882 GIOCondition cond = 0;
2883 *timeout = -1;
2884
74637e6f
LC
2885 rcu_read_lock();
2886 if (rsource->condition == G_IO_IN) {
2887 rdma = atomic_rcu_read(&rsource->rioc->rdmain);
2888 } else {
2889 rdma = atomic_rcu_read(&rsource->rioc->rdmaout);
2890 }
2891
2892 if (!rdma) {
2893 error_report("RDMAContext is NULL when prepare Gsource");
2894 rcu_read_unlock();
2895 return FALSE;
2896 }
2897
6ddd2d76
DB
2898 if (rdma->wr_data[0].control_len) {
2899 cond |= G_IO_IN;
2900 }
2901 cond |= G_IO_OUT;
2902
74637e6f 2903 rcu_read_unlock();
6ddd2d76
DB
2904 return cond & rsource->condition;
2905}
2906
2907static gboolean
2908qio_channel_rdma_source_check(GSource *source)
2909{
2910 QIOChannelRDMASource *rsource = (QIOChannelRDMASource *)source;
74637e6f 2911 RDMAContext *rdma;
6ddd2d76
DB
2912 GIOCondition cond = 0;
2913
74637e6f
LC
2914 rcu_read_lock();
2915 if (rsource->condition == G_IO_IN) {
2916 rdma = atomic_rcu_read(&rsource->rioc->rdmain);
2917 } else {
2918 rdma = atomic_rcu_read(&rsource->rioc->rdmaout);
2919 }
2920
2921 if (!rdma) {
2922 error_report("RDMAContext is NULL when check Gsource");
2923 rcu_read_unlock();
2924 return FALSE;
2925 }
2926
6ddd2d76
DB
2927 if (rdma->wr_data[0].control_len) {
2928 cond |= G_IO_IN;
2929 }
2930 cond |= G_IO_OUT;
2931
74637e6f 2932 rcu_read_unlock();
6ddd2d76
DB
2933 return cond & rsource->condition;
2934}
2935
2936static gboolean
2937qio_channel_rdma_source_dispatch(GSource *source,
2938 GSourceFunc callback,
2939 gpointer user_data)
2940{
2941 QIOChannelFunc func = (QIOChannelFunc)callback;
2942 QIOChannelRDMASource *rsource = (QIOChannelRDMASource *)source;
74637e6f 2943 RDMAContext *rdma;
6ddd2d76
DB
2944 GIOCondition cond = 0;
2945
74637e6f
LC
2946 rcu_read_lock();
2947 if (rsource->condition == G_IO_IN) {
2948 rdma = atomic_rcu_read(&rsource->rioc->rdmain);
2949 } else {
2950 rdma = atomic_rcu_read(&rsource->rioc->rdmaout);
2951 }
2952
2953 if (!rdma) {
2954 error_report("RDMAContext is NULL when dispatch Gsource");
2955 rcu_read_unlock();
2956 return FALSE;
2957 }
2958
6ddd2d76
DB
2959 if (rdma->wr_data[0].control_len) {
2960 cond |= G_IO_IN;
2961 }
2962 cond |= G_IO_OUT;
2963
74637e6f 2964 rcu_read_unlock();
6ddd2d76
DB
2965 return (*func)(QIO_CHANNEL(rsource->rioc),
2966 (cond & rsource->condition),
2967 user_data);
2968}
2969
2970static void
2971qio_channel_rdma_source_finalize(GSource *source)
2972{
2973 QIOChannelRDMASource *ssource = (QIOChannelRDMASource *)source;
2974
2975 object_unref(OBJECT(ssource->rioc));
2976}
2977
2978GSourceFuncs qio_channel_rdma_source_funcs = {
2979 qio_channel_rdma_source_prepare,
2980 qio_channel_rdma_source_check,
2981 qio_channel_rdma_source_dispatch,
2982 qio_channel_rdma_source_finalize
2983};
2984
2985static GSource *qio_channel_rdma_create_watch(QIOChannel *ioc,
2986 GIOCondition condition)
2da776db 2987{
6ddd2d76
DB
2988 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
2989 QIOChannelRDMASource *ssource;
2990 GSource *source;
2991
2992 source = g_source_new(&qio_channel_rdma_source_funcs,
2993 sizeof(QIOChannelRDMASource));
2994 ssource = (QIOChannelRDMASource *)source;
2995
2996 ssource->rioc = rioc;
2997 object_ref(OBJECT(rioc));
2998
2999 ssource->condition = condition;
3000
3001 return source;
3002}
3003
4d9f675b
LC
3004static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
3005 AioContext *ctx,
3006 IOHandler *io_read,
3007 IOHandler *io_write,
3008 void *opaque)
3009{
3010 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
3011 if (io_read) {
3012 aio_set_fd_handler(ctx, rioc->rdmain->comp_channel->fd,
3013 false, io_read, io_write, NULL, opaque);
3014 } else {
3015 aio_set_fd_handler(ctx, rioc->rdmaout->comp_channel->fd,
3016 false, io_read, io_write, NULL, opaque);
3017 }
3018}
6ddd2d76 3019
d46a4847
DDAG
3020struct rdma_close_rcu {
3021 struct rcu_head rcu;
3022 RDMAContext *rdmain;
3023 RDMAContext *rdmaout;
3024};
3025
3026/* callback from qio_channel_rdma_close via call_rcu */
3027static void qio_channel_rdma_close_rcu(struct rdma_close_rcu *rcu)
3028{
3029 if (rcu->rdmain) {
3030 qemu_rdma_cleanup(rcu->rdmain);
3031 }
3032
3033 if (rcu->rdmaout) {
3034 qemu_rdma_cleanup(rcu->rdmaout);
3035 }
3036
3037 g_free(rcu->rdmain);
3038 g_free(rcu->rdmaout);
3039 g_free(rcu);
3040}
3041
6ddd2d76
DB
3042static int qio_channel_rdma_close(QIOChannel *ioc,
3043 Error **errp)
3044{
3045 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
74637e6f 3046 RDMAContext *rdmain, *rdmaout;
d46a4847
DDAG
3047 struct rdma_close_rcu *rcu = g_new(struct rdma_close_rcu, 1);
3048
733252de 3049 trace_qemu_rdma_close();
74637e6f
LC
3050
3051 rdmain = rioc->rdmain;
3052 if (rdmain) {
3053 atomic_rcu_set(&rioc->rdmain, NULL);
3054 }
3055
3056 rdmaout = rioc->rdmaout;
3057 if (rdmaout) {
3058 atomic_rcu_set(&rioc->rdmaout, NULL);
2da776db 3059 }
74637e6f 3060
d46a4847
DDAG
3061 rcu->rdmain = rdmain;
3062 rcu->rdmaout = rdmaout;
3063 call_rcu(rcu, qio_channel_rdma_close_rcu, rcu);
74637e6f 3064
2da776db
MH
3065 return 0;
3066}
3067
54db882f
LC
3068static int
3069qio_channel_rdma_shutdown(QIOChannel *ioc,
3070 QIOChannelShutdown how,
3071 Error **errp)
3072{
3073 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
3074 RDMAContext *rdmain, *rdmaout;
3075
3076 rcu_read_lock();
3077
3078 rdmain = atomic_rcu_read(&rioc->rdmain);
3079 rdmaout = atomic_rcu_read(&rioc->rdmain);
3080
3081 switch (how) {
3082 case QIO_CHANNEL_SHUTDOWN_READ:
3083 if (rdmain) {
3084 rdmain->error_state = -1;
3085 }
3086 break;
3087 case QIO_CHANNEL_SHUTDOWN_WRITE:
3088 if (rdmaout) {
3089 rdmaout->error_state = -1;
3090 }
3091 break;
3092 case QIO_CHANNEL_SHUTDOWN_BOTH:
3093 default:
3094 if (rdmain) {
3095 rdmain->error_state = -1;
3096 }
3097 if (rdmaout) {
3098 rdmaout->error_state = -1;
3099 }
3100 break;
3101 }
3102
3103 rcu_read_unlock();
3104 return 0;
3105}
3106
2da776db
MH
3107/*
3108 * Parameters:
3109 * @offset == 0 :
3110 * This means that 'block_offset' is a full virtual address that does not
3111 * belong to a RAMBlock of the virtual machine and instead
3112 * represents a private malloc'd memory area that the caller wishes to
3113 * transfer.
3114 *
3115 * @offset != 0 :
3116 * Offset is an offset to be added to block_offset and used
3117 * to also lookup the corresponding RAMBlock.
3118 *
3119 * @size > 0 :
3120 * Initiate an transfer this size.
3121 *
3122 * @size == 0 :
3123 * A 'hint' or 'advice' that means that we wish to speculatively
3124 * and asynchronously unregister this memory. In this case, there is no
52f35022 3125 * guarantee that the unregister will actually happen, for example,
2da776db
MH
3126 * if the memory is being actively transmitted. Additionally, the memory
3127 * may be re-registered at any future time if a write within the same
3128 * chunk was requested again, even if you attempted to unregister it
3129 * here.
3130 *
3131 * @size < 0 : TODO, not yet supported
3132 * Unregister the memory NOW. This means that the caller does not
3133 * expect there to be any future RDMA transfers and we just want to clean
3134 * things up. This is used in case the upper layer owns the memory and
3135 * cannot wait for qemu_fclose() to occur.
3136 *
3137 * @bytes_sent : User-specificed pointer to indicate how many bytes were
3138 * sent. Usually, this will not be more than a few bytes of
3139 * the protocol because most transfers are sent asynchronously.
3140 */
3141static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
3142 ram_addr_t block_offset, ram_addr_t offset,
6e1dea46 3143 size_t size, uint64_t *bytes_sent)
2da776db 3144{
6ddd2d76 3145 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque);
74637e6f 3146 RDMAContext *rdma;
2da776db
MH
3147 int ret;
3148
74637e6f
LC
3149 rcu_read_lock();
3150 rdma = atomic_rcu_read(&rioc->rdmaout);
3151
3152 if (!rdma) {
3153 rcu_read_unlock();
3154 return -EIO;
3155 }
3156
2da776db
MH
3157 CHECK_ERROR_STATE();
3158
6a88eb2b 3159 if (migration_in_postcopy()) {
74637e6f 3160 rcu_read_unlock();
ccb7e1b5
LC
3161 return RAM_SAVE_CONTROL_NOT_SUPP;
3162 }
3163
2da776db
MH
3164 qemu_fflush(f);
3165
3166 if (size > 0) {
3167 /*
3168 * Add this page to the current 'chunk'. If the chunk
3169 * is full, or the page doen't belong to the current chunk,
3170 * an actual RDMA write will occur and a new chunk will be formed.
3171 */
3172 ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
3173 if (ret < 0) {
733252de 3174 error_report("rdma migration: write error! %d", ret);
2da776db
MH
3175 goto err;
3176 }
3177
3178 /*
3179 * We always return 1 bytes because the RDMA
3180 * protocol is completely asynchronous. We do not yet know
3181 * whether an identified chunk is zero or not because we're
3182 * waiting for other pages to potentially be merged with
3183 * the current chunk. So, we have to call qemu_update_position()
3184 * later on when the actual write occurs.
3185 */
3186 if (bytes_sent) {
3187 *bytes_sent = 1;
3188 }
3189 } else {
3190 uint64_t index, chunk;
3191
3192 /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
3193 if (size < 0) {
3194 ret = qemu_rdma_drain_cq(f, rdma);
3195 if (ret < 0) {
3196 fprintf(stderr, "rdma: failed to synchronously drain"
3197 " completion queue before unregistration.\n");
3198 goto err;
3199 }
3200 }
3201 */
3202
3203 ret = qemu_rdma_search_ram_block(rdma, block_offset,
3204 offset, size, &index, &chunk);
3205
3206 if (ret) {
733252de 3207 error_report("ram block search failed");
2da776db
MH
3208 goto err;
3209 }
3210
3211 qemu_rdma_signal_unregister(rdma, index, chunk, 0);
3212
3213 /*
52f35022 3214 * TODO: Synchronous, guaranteed unregistration (should not occur during
2da776db
MH
3215 * fast-path). Otherwise, unregisters will process on the next call to
3216 * qemu_rdma_drain_cq()
3217 if (size < 0) {
3218 qemu_rdma_unregister_waiting(rdma);
3219 }
3220 */
3221 }
3222
3223 /*
3224 * Drain the Completion Queue if possible, but do not block,
3225 * just poll.
3226 *
3227 * If nothing to poll, the end of the iteration will do this
3228 * again to make sure we don't overflow the request queue.
3229 */
3230 while (1) {
3231 uint64_t wr_id, wr_id_in;
88571882 3232 int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL);
2da776db 3233 if (ret < 0) {
733252de 3234 error_report("rdma migration: polling error! %d", ret);
2da776db
MH
3235 goto err;
3236 }
3237
3238 wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
3239
3240 if (wr_id == RDMA_WRID_NONE) {
3241 break;
3242 }
3243 }
3244
74637e6f 3245 rcu_read_unlock();
2da776db
MH
3246 return RAM_SAVE_CONTROL_DELAYED;
3247err:
3248 rdma->error_state = ret;
74637e6f 3249 rcu_read_unlock();
2da776db
MH
3250 return ret;
3251}
3252
55cc1b59
LC
3253static void rdma_accept_incoming_migration(void *opaque);
3254
92370989
LC
3255static void rdma_cm_poll_handler(void *opaque)
3256{
3257 RDMAContext *rdma = opaque;
3258 int ret;
3259 struct rdma_cm_event *cm_event;
3260 MigrationIncomingState *mis = migration_incoming_get_current();
3261
3262 ret = rdma_get_cm_event(rdma->channel, &cm_event);
3263 if (ret) {
3264 error_report("get_cm_event failed %d", errno);
3265 return;
3266 }
3267 rdma_ack_cm_event(cm_event);
3268
3269 if (cm_event->event == RDMA_CM_EVENT_DISCONNECTED ||
3270 cm_event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
de8434a3
DDAG
3271 if (!rdma->error_state &&
3272 migration_incoming_get_current()->state !=
3273 MIGRATION_STATUS_COMPLETED) {
3274 error_report("receive cm event, cm event is %d", cm_event->event);
3275 rdma->error_state = -EPIPE;
3276 if (rdma->return_path) {
3277 rdma->return_path->error_state = -EPIPE;
3278 }
92370989
LC
3279 }
3280
3281 if (mis->migration_incoming_co) {
3282 qemu_coroutine_enter(mis->migration_incoming_co);
3283 }
3284 return;
3285 }
3286}
3287
2da776db
MH
3288static int qemu_rdma_accept(RDMAContext *rdma)
3289{
3290 RDMACapabilities cap;
3291 struct rdma_conn_param conn_param = {
3292 .responder_resources = 2,
3293 .private_data = &cap,
3294 .private_data_len = sizeof(cap),
3295 };
3296 struct rdma_cm_event *cm_event;
3297 struct ibv_context *verbs;
3298 int ret = -EINVAL;
3299 int idx;
3300
3301 ret = rdma_get_cm_event(rdma->channel, &cm_event);
3302 if (ret) {
3303 goto err_rdma_dest_wait;
3304 }
3305
3306 if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
3307 rdma_ack_cm_event(cm_event);
3308 goto err_rdma_dest_wait;
3309 }
3310
3311 memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap));
3312
3313 network_to_caps(&cap);
3314
3315 if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) {
733252de 3316 error_report("Unknown source RDMA version: %d, bailing...",
2da776db
MH
3317 cap.version);
3318 rdma_ack_cm_event(cm_event);
3319 goto err_rdma_dest_wait;
3320 }
3321
3322 /*
3323 * Respond with only the capabilities this version of QEMU knows about.
3324 */
3325 cap.flags &= known_capabilities;
3326
3327 /*
3328 * Enable the ones that we do know about.
3329 * Add other checks here as new ones are introduced.
3330 */
3331 if (cap.flags & RDMA_CAPABILITY_PIN_ALL) {
3332 rdma->pin_all = true;
3333 }
3334
3335 rdma->cm_id = cm_event->id;
3336 verbs = cm_event->id->verbs;
3337
3338 rdma_ack_cm_event(cm_event);
3339
733252de 3340 trace_qemu_rdma_accept_pin_state(rdma->pin_all);
2da776db
MH
3341
3342 caps_to_network(&cap);
3343
733252de 3344 trace_qemu_rdma_accept_pin_verbsc(verbs);
2da776db
MH
3345
3346 if (!rdma->verbs) {
3347 rdma->verbs = verbs;
3348 } else if (rdma->verbs != verbs) {
733252de
DDAG
3349 error_report("ibv context not matching %p, %p!", rdma->verbs,
3350 verbs);
2da776db
MH
3351 goto err_rdma_dest_wait;
3352 }
3353
3354 qemu_rdma_dump_id("dest_init", verbs);
3355
3356 ret = qemu_rdma_alloc_pd_cq(rdma);
3357 if (ret) {
733252de 3358 error_report("rdma migration: error allocating pd and cq!");
2da776db
MH
3359 goto err_rdma_dest_wait;
3360 }
3361
3362 ret = qemu_rdma_alloc_qp(rdma);
3363 if (ret) {
733252de 3364 error_report("rdma migration: error allocating qp!");
2da776db
MH
3365 goto err_rdma_dest_wait;
3366 }
3367
3368 ret = qemu_rdma_init_ram_blocks(rdma);
3369 if (ret) {
733252de 3370 error_report("rdma migration: error initializing ram blocks!");
2da776db
MH
3371 goto err_rdma_dest_wait;
3372 }
3373
1f22364b 3374 for (idx = 0; idx < RDMA_WRID_MAX; idx++) {
2da776db
MH
3375 ret = qemu_rdma_reg_control(rdma, idx);
3376 if (ret) {
733252de 3377 error_report("rdma: error registering %d control", idx);
2da776db
MH
3378 goto err_rdma_dest_wait;
3379 }
3380 }
3381
55cc1b59
LC
3382 /* Accept the second connection request for return path */
3383 if (migrate_postcopy() && !rdma->is_return_path) {
3384 qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
3385 NULL,
3386 (void *)(intptr_t)rdma->return_path);
3387 } else {
92370989
LC
3388 qemu_set_fd_handler(rdma->channel->fd, rdma_cm_poll_handler,
3389 NULL, rdma);
55cc1b59 3390 }
2da776db
MH
3391
3392 ret = rdma_accept(rdma->cm_id, &conn_param);
3393 if (ret) {
733252de 3394 error_report("rdma_accept returns %d", ret);
2da776db
MH
3395 goto err_rdma_dest_wait;
3396 }
3397
3398 ret = rdma_get_cm_event(rdma->channel, &cm_event);
3399 if (ret) {
733252de 3400 error_report("rdma_accept get_cm_event failed %d", ret);
2da776db
MH
3401 goto err_rdma_dest_wait;
3402 }
3403
3404 if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) {
733252de 3405 error_report("rdma_accept not event established");
2da776db
MH
3406 rdma_ack_cm_event(cm_event);
3407 goto err_rdma_dest_wait;
3408 }
3409
3410 rdma_ack_cm_event(cm_event);
5a91337c 3411 rdma->connected = true;
2da776db 3412
87772639 3413 ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
2da776db 3414 if (ret) {
733252de 3415 error_report("rdma migration: error posting second control recv");
2da776db
MH
3416 goto err_rdma_dest_wait;
3417 }
3418
3419 qemu_rdma_dump_gid("dest_connect", rdma->cm_id);
3420
3421 return 0;
3422
3423err_rdma_dest_wait:
3424 rdma->error_state = ret;
3425 qemu_rdma_cleanup(rdma);
3426 return ret;
3427}
3428
e4d63320
DDAG
3429static int dest_ram_sort_func(const void *a, const void *b)
3430{
3431 unsigned int a_index = ((const RDMALocalBlock *)a)->src_index;
3432 unsigned int b_index = ((const RDMALocalBlock *)b)->src_index;
3433
3434 return (a_index < b_index) ? -1 : (a_index != b_index);
3435}
3436
2da776db
MH
3437/*
3438 * During each iteration of the migration, we listen for instructions
3439 * by the source VM to perform dynamic page registrations before they
3440 * can perform RDMA operations.
3441 *
3442 * We respond with the 'rkey'.
3443 *
3444 * Keep doing this until the source tells us to stop.
3445 */
632e3a5c 3446static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
2da776db
MH
3447{
3448 RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
3449 .type = RDMA_CONTROL_REGISTER_RESULT,
3450 .repeat = 0,
3451 };
3452 RDMAControlHeader unreg_resp = { .len = 0,
3453 .type = RDMA_CONTROL_UNREGISTER_FINISHED,
3454 .repeat = 0,
3455 };
3456 RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
3457 .repeat = 1 };
6ddd2d76 3458 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque);
74637e6f
LC
3459 RDMAContext *rdma;
3460 RDMALocalBlocks *local;
2da776db
MH
3461 RDMAControlHeader head;
3462 RDMARegister *reg, *registers;
3463 RDMACompress *comp;
3464 RDMARegisterResult *reg_result;
3465 static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE];
3466 RDMALocalBlock *block;
3467 void *host_addr;
3468 int ret = 0;
3469 int idx = 0;
3470 int count = 0;
3471 int i = 0;
3472
74637e6f
LC
3473 rcu_read_lock();
3474 rdma = atomic_rcu_read(&rioc->rdmain);
3475
3476 if (!rdma) {
3477 rcu_read_unlock();
3478 return -EIO;
3479 }
3480
2da776db
MH
3481 CHECK_ERROR_STATE();
3482
74637e6f 3483 local = &rdma->local_ram_blocks;
2da776db 3484 do {
632e3a5c 3485 trace_qemu_rdma_registration_handle_wait();
2da776db
MH
3486
3487 ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE);
3488
3489 if (ret < 0) {
3490 break;
3491 }
3492
3493 if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) {
733252de
DDAG
3494 error_report("rdma: Too many requests in this message (%d)."
3495 "Bailing.", head.repeat);
2da776db
MH
3496 ret = -EIO;
3497 break;
3498 }
3499
3500 switch (head.type) {
3501 case RDMA_CONTROL_COMPRESS:
3502 comp = (RDMACompress *) rdma->wr_data[idx].control_curr;
3503 network_to_compress(comp);
3504
733252de
DDAG
3505 trace_qemu_rdma_registration_handle_compress(comp->length,
3506 comp->block_idx,
3507 comp->offset);
afcddefd
DDAG
3508 if (comp->block_idx >= rdma->local_ram_blocks.nb_blocks) {
3509 error_report("rdma: 'compress' bad block index %u (vs %d)",
3510 (unsigned int)comp->block_idx,
3511 rdma->local_ram_blocks.nb_blocks);
3512 ret = -EIO;
24b41d66 3513 goto out;
afcddefd 3514 }
2da776db
MH
3515 block = &(rdma->local_ram_blocks.block[comp->block_idx]);
3516
3517 host_addr = block->local_host_addr +
3518 (comp->offset - block->offset);
3519
3520 ram_handle_compressed(host_addr, comp->value, comp->length);
3521 break;
3522
3523 case RDMA_CONTROL_REGISTER_FINISHED:
733252de 3524 trace_qemu_rdma_registration_handle_finished();
2da776db
MH
3525 goto out;
3526
3527 case RDMA_CONTROL_RAM_BLOCKS_REQUEST:
733252de 3528 trace_qemu_rdma_registration_handle_ram_blocks();
2da776db 3529
e4d63320
DDAG
3530 /* Sort our local RAM Block list so it's the same as the source,
3531 * we can do this since we've filled in a src_index in the list
3532 * as we received the RAMBlock list earlier.
3533 */
3534 qsort(rdma->local_ram_blocks.block,
3535 rdma->local_ram_blocks.nb_blocks,
3536 sizeof(RDMALocalBlock), dest_ram_sort_func);
71cd7306
LC
3537 for (i = 0; i < local->nb_blocks; i++) {
3538 local->block[i].index = i;
3539 }
3540
2da776db
MH
3541 if (rdma->pin_all) {
3542 ret = qemu_rdma_reg_whole_ram_blocks(rdma);
3543 if (ret) {
733252de
DDAG
3544 error_report("rdma migration: error dest "
3545 "registering ram blocks");
2da776db
MH
3546 goto out;
3547 }
3548 }
3549
3550 /*
3551 * Dest uses this to prepare to transmit the RAMBlock descriptions
3552 * to the source VM after connection setup.
3553 * Both sides use the "remote" structure to communicate and update
3554 * their "local" descriptions with what was sent.
3555 */
3556 for (i = 0; i < local->nb_blocks; i++) {
a97270ad 3557 rdma->dest_blocks[i].remote_host_addr =
fbce8c25 3558 (uintptr_t)(local->block[i].local_host_addr);
2da776db
MH
3559
3560 if (rdma->pin_all) {
a97270ad 3561 rdma->dest_blocks[i].remote_rkey = local->block[i].mr->rkey;
2da776db
MH
3562 }
3563
a97270ad
DDAG
3564 rdma->dest_blocks[i].offset = local->block[i].offset;
3565 rdma->dest_blocks[i].length = local->block[i].length;
2da776db 3566
a97270ad 3567 dest_block_to_network(&rdma->dest_blocks[i]);
e4d63320
DDAG
3568 trace_qemu_rdma_registration_handle_ram_blocks_loop(
3569 local->block[i].block_name,
3570 local->block[i].offset,
3571 local->block[i].length,
3572 local->block[i].local_host_addr,
3573 local->block[i].src_index);
2da776db
MH
3574 }
3575
3576 blocks.len = rdma->local_ram_blocks.nb_blocks
a97270ad 3577 * sizeof(RDMADestBlock);
2da776db
MH
3578
3579
3580 ret = qemu_rdma_post_send_control(rdma,
a97270ad 3581 (uint8_t *) rdma->dest_blocks, &blocks);
2da776db
MH
3582
3583 if (ret < 0) {
733252de 3584 error_report("rdma migration: error sending remote info");
2da776db
MH
3585 goto out;
3586 }
3587
3588 break;
3589 case RDMA_CONTROL_REGISTER_REQUEST:
733252de 3590 trace_qemu_rdma_registration_handle_register(head.repeat);
2da776db
MH
3591
3592 reg_resp.repeat = head.repeat;
3593 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3594
3595 for (count = 0; count < head.repeat; count++) {
3596 uint64_t chunk;
3597 uint8_t *chunk_start, *chunk_end;
3598
3599 reg = &registers[count];
3600 network_to_register(reg);
3601
3602 reg_result = &results[count];
3603
733252de 3604 trace_qemu_rdma_registration_handle_register_loop(count,
2da776db
MH
3605 reg->current_index, reg->key.current_addr, reg->chunks);
3606
afcddefd
DDAG
3607 if (reg->current_index >= rdma->local_ram_blocks.nb_blocks) {
3608 error_report("rdma: 'register' bad block index %u (vs %d)",
3609 (unsigned int)reg->current_index,
3610 rdma->local_ram_blocks.nb_blocks);
3611 ret = -ENOENT;
24b41d66 3612 goto out;
afcddefd 3613 }
2da776db
MH
3614 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3615 if (block->is_ram_block) {
afcddefd
DDAG
3616 if (block->offset > reg->key.current_addr) {
3617 error_report("rdma: bad register address for block %s"
3618 " offset: %" PRIx64 " current_addr: %" PRIx64,
3619 block->block_name, block->offset,
3620 reg->key.current_addr);
3621 ret = -ERANGE;
24b41d66 3622 goto out;
afcddefd 3623 }
2da776db
MH
3624 host_addr = (block->local_host_addr +
3625 (reg->key.current_addr - block->offset));
3626 chunk = ram_chunk_index(block->local_host_addr,
3627 (uint8_t *) host_addr);
3628 } else {
3629 chunk = reg->key.chunk;
3630 host_addr = block->local_host_addr +
3631 (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT));
afcddefd
DDAG
3632 /* Check for particularly bad chunk value */
3633 if (host_addr < (void *)block->local_host_addr) {
3634 error_report("rdma: bad chunk for block %s"
3635 " chunk: %" PRIx64,
3636 block->block_name, reg->key.chunk);
3637 ret = -ERANGE;
24b41d66 3638 goto out;
afcddefd 3639 }
2da776db
MH
3640 }
3641 chunk_start = ram_chunk_start(block, chunk);
3642 chunk_end = ram_chunk_end(block, chunk + reg->chunks);
9589e763
MA
3643 /* avoid "-Waddress-of-packed-member" warning */
3644 uint32_t tmp_rkey = 0;
2da776db 3645 if (qemu_rdma_register_and_get_keys(rdma, block,
9589e763 3646 (uintptr_t)host_addr, NULL, &tmp_rkey,
2da776db 3647 chunk, chunk_start, chunk_end)) {
733252de 3648 error_report("cannot get rkey");
2da776db
MH
3649 ret = -EINVAL;
3650 goto out;
3651 }
9589e763 3652 reg_result->rkey = tmp_rkey;
2da776db 3653
fbce8c25 3654 reg_result->host_addr = (uintptr_t)block->local_host_addr;
2da776db 3655
733252de
DDAG
3656 trace_qemu_rdma_registration_handle_register_rkey(
3657 reg_result->rkey);
2da776db
MH
3658
3659 result_to_network(reg_result);
3660 }
3661
3662 ret = qemu_rdma_post_send_control(rdma,
3663 (uint8_t *) results, &reg_resp);
3664
3665 if (ret < 0) {
733252de 3666 error_report("Failed to send control buffer");
2da776db
MH
3667 goto out;
3668 }
3669 break;
3670 case RDMA_CONTROL_UNREGISTER_REQUEST:
733252de 3671 trace_qemu_rdma_registration_handle_unregister(head.repeat);
2da776db
MH
3672 unreg_resp.repeat = head.repeat;
3673 registers = (RDMARegister *) rdma->wr_data[idx].control_curr;
3674
3675 for (count = 0; count < head.repeat; count++) {
3676 reg = &registers[count];
3677 network_to_register(reg);
3678
733252de
DDAG
3679 trace_qemu_rdma_registration_handle_unregister_loop(count,
3680 reg->current_index, reg->key.chunk);
2da776db
MH
3681
3682 block = &(rdma->local_ram_blocks.block[reg->current_index]);
3683
3684 ret = ibv_dereg_mr(block->pmr[reg->key.chunk]);
3685 block->pmr[reg->key.chunk] = NULL;
3686
3687 if (ret != 0) {
3688 perror("rdma unregistration chunk failed");
3689 ret = -ret;
3690 goto out;
3691 }
3692
3693 rdma->total_registrations--;
3694
733252de
DDAG
3695 trace_qemu_rdma_registration_handle_unregister_success(
3696 reg->key.chunk);
2da776db
MH
3697 }
3698
3699 ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp);
3700
3701 if (ret < 0) {
733252de 3702 error_report("Failed to send control buffer");
2da776db
MH
3703 goto out;
3704 }
3705 break;
3706 case RDMA_CONTROL_REGISTER_RESULT:
733252de 3707 error_report("Invalid RESULT message at dest.");
2da776db
MH
3708 ret = -EIO;
3709 goto out;
3710 default:
482a33c5 3711 error_report("Unknown control message %s", control_desc(head.type));
2da776db
MH
3712 ret = -EIO;
3713 goto out;
3714 }
3715 } while (1);
3716out:
3717 if (ret < 0) {
3718 rdma->error_state = ret;
3719 }
74637e6f 3720 rcu_read_unlock();
2da776db
MH
3721 return ret;
3722}
3723
e4d63320
DDAG
3724/* Destination:
3725 * Called via a ram_control_load_hook during the initial RAM load section which
3726 * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks
3727 * on the source.
3728 * We've already built our local RAMBlock list, but not yet sent the list to
3729 * the source.
3730 */
6ddd2d76
DB
3731static int
3732rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name)
e4d63320 3733{
74637e6f 3734 RDMAContext *rdma;
e4d63320
DDAG
3735 int curr;
3736 int found = -1;
3737
74637e6f
LC
3738 rcu_read_lock();
3739 rdma = atomic_rcu_read(&rioc->rdmain);
3740
3741 if (!rdma) {
3742 rcu_read_unlock();
3743 return -EIO;
3744 }
3745
e4d63320
DDAG
3746 /* Find the matching RAMBlock in our local list */
3747 for (curr = 0; curr < rdma->local_ram_blocks.nb_blocks; curr++) {
3748 if (!strcmp(rdma->local_ram_blocks.block[curr].block_name, name)) {
3749 found = curr;
3750 break;
3751 }
3752 }
3753
3754 if (found == -1) {
3755 error_report("RAMBlock '%s' not found on destination", name);
74637e6f 3756 rcu_read_unlock();
e4d63320
DDAG
3757 return -ENOENT;
3758 }
3759
3760 rdma->local_ram_blocks.block[curr].src_index = rdma->next_src_index;
3761 trace_rdma_block_notification_handle(name, rdma->next_src_index);
3762 rdma->next_src_index++;
3763
74637e6f 3764 rcu_read_unlock();
e4d63320
DDAG
3765 return 0;
3766}
3767
632e3a5c
DDAG
3768static int rdma_load_hook(QEMUFile *f, void *opaque, uint64_t flags, void *data)
3769{
3770 switch (flags) {
3771 case RAM_CONTROL_BLOCK_REG:
e4d63320 3772 return rdma_block_notification_handle(opaque, data);
632e3a5c
DDAG
3773
3774 case RAM_CONTROL_HOOK:
3775 return qemu_rdma_registration_handle(f, opaque);
3776
3777 default:
3778 /* Shouldn't be called with any other values */
3779 abort();
3780 }
3781}
3782
2da776db 3783static int qemu_rdma_registration_start(QEMUFile *f, void *opaque,
632e3a5c 3784 uint64_t flags, void *data)
2da776db 3785{
6ddd2d76 3786 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque);
74637e6f
LC
3787 RDMAContext *rdma;
3788
3789 rcu_read_lock();
3790 rdma = atomic_rcu_read(&rioc->rdmaout);
3791 if (!rdma) {
3792 rcu_read_unlock();
3793 return -EIO;
3794 }
2da776db
MH
3795
3796 CHECK_ERROR_STATE();
3797
6a88eb2b 3798 if (migration_in_postcopy()) {
74637e6f 3799 rcu_read_unlock();
ccb7e1b5
LC
3800 return 0;
3801 }
3802
733252de 3803 trace_qemu_rdma_registration_start(flags);
2da776db
MH
3804 qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
3805 qemu_fflush(f);
3806
74637e6f 3807 rcu_read_unlock();
2da776db
MH
3808 return 0;
3809}
3810
3811/*
3812 * Inform dest that dynamic registrations are done for now.
3813 * First, flush writes, if any.
3814 */
3815static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque,
632e3a5c 3816 uint64_t flags, void *data)
2da776db
MH
3817{
3818 Error *local_err = NULL, **errp = &local_err;
6ddd2d76 3819 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque);
74637e6f 3820 RDMAContext *rdma;
2da776db
MH
3821 RDMAControlHeader head = { .len = 0, .repeat = 1 };
3822 int ret = 0;
3823
74637e6f
LC
3824 rcu_read_lock();
3825 rdma = atomic_rcu_read(&rioc->rdmaout);
3826 if (!rdma) {
3827 rcu_read_unlock();
3828 return -EIO;
3829 }
3830
2da776db
MH
3831 CHECK_ERROR_STATE();
3832
6a88eb2b 3833 if (migration_in_postcopy()) {
74637e6f 3834 rcu_read_unlock();
ccb7e1b5
LC
3835 return 0;
3836 }
3837
2da776db
MH
3838 qemu_fflush(f);
3839 ret = qemu_rdma_drain_cq(f, rdma);
3840
3841 if (ret < 0) {
3842 goto err;
3843 }
3844
3845 if (flags == RAM_CONTROL_SETUP) {
3846 RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT };
3847 RDMALocalBlocks *local = &rdma->local_ram_blocks;
e4d63320 3848 int reg_result_idx, i, nb_dest_blocks;
2da776db
MH
3849
3850 head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST;
733252de 3851 trace_qemu_rdma_registration_stop_ram();
2da776db
MH
3852
3853 /*
3854 * Make sure that we parallelize the pinning on both sides.
3855 * For very large guests, doing this serially takes a really
3856 * long time, so we have to 'interleave' the pinning locally
3857 * with the control messages by performing the pinning on this
3858 * side before we receive the control response from the other
3859 * side that the pinning has completed.
3860 */
3861 ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp,
3862 &reg_result_idx, rdma->pin_all ?
3863 qemu_rdma_reg_whole_ram_blocks : NULL);
3864 if (ret < 0) {
66988941 3865 ERROR(errp, "receiving remote info!");
74637e6f 3866 rcu_read_unlock();
2da776db
MH
3867 return ret;
3868 }
3869
a97270ad 3870 nb_dest_blocks = resp.len / sizeof(RDMADestBlock);
2da776db
MH
3871
3872 /*
3873 * The protocol uses two different sets of rkeys (mutually exclusive):
3874 * 1. One key to represent the virtual address of the entire ram block.
3875 * (dynamic chunk registration disabled - pin everything with one rkey.)
3876 * 2. One to represent individual chunks within a ram block.
3877 * (dynamic chunk registration enabled - pin individual chunks.)
3878 *
3879 * Once the capability is successfully negotiated, the destination transmits
3880 * the keys to use (or sends them later) including the virtual addresses
3881 * and then propagates the remote ram block descriptions to his local copy.
3882 */
3883
a97270ad 3884 if (local->nb_blocks != nb_dest_blocks) {
e4d63320 3885 ERROR(errp, "ram blocks mismatch (Number of blocks %d vs %d) "
2da776db 3886 "Your QEMU command line parameters are probably "
e4d63320
DDAG
3887 "not identical on both the source and destination.",
3888 local->nb_blocks, nb_dest_blocks);
ef4b722d 3889 rdma->error_state = -EINVAL;
74637e6f 3890 rcu_read_unlock();
2da776db
MH
3891 return -EINVAL;
3892 }
3893
885e8f98 3894 qemu_rdma_move_header(rdma, reg_result_idx, &resp);
a97270ad 3895 memcpy(rdma->dest_blocks,
885e8f98 3896 rdma->wr_data[reg_result_idx].control_curr, resp.len);
a97270ad
DDAG
3897 for (i = 0; i < nb_dest_blocks; i++) {
3898 network_to_dest_block(&rdma->dest_blocks[i]);
2da776db 3899
e4d63320
DDAG
3900 /* We require that the blocks are in the same order */
3901 if (rdma->dest_blocks[i].length != local->block[i].length) {
3902 ERROR(errp, "Block %s/%d has a different length %" PRIu64
3903 "vs %" PRIu64, local->block[i].block_name, i,
3904 local->block[i].length,
3905 rdma->dest_blocks[i].length);
ef4b722d 3906 rdma->error_state = -EINVAL;
74637e6f 3907 rcu_read_unlock();
2da776db
MH
3908 return -EINVAL;
3909 }
e4d63320
DDAG
3910 local->block[i].remote_host_addr =
3911 rdma->dest_blocks[i].remote_host_addr;
3912 local->block[i].remote_rkey = rdma->dest_blocks[i].remote_rkey;
2da776db
MH
3913 }
3914 }
3915
733252de 3916 trace_qemu_rdma_registration_stop(flags);
2da776db
MH
3917
3918 head.type = RDMA_CONTROL_REGISTER_FINISHED;
3919 ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL);
3920
3921 if (ret < 0) {
3922 goto err;
3923 }
3924
74637e6f 3925 rcu_read_unlock();
2da776db
MH
3926 return 0;
3927err:
3928 rdma->error_state = ret;
74637e6f 3929 rcu_read_unlock();
2da776db
MH
3930 return ret;
3931}
3932
0436e09f 3933static const QEMUFileHooks rdma_read_hooks = {
632e3a5c 3934 .hook_ram_load = rdma_load_hook,
2da776db
MH
3935};
3936
0436e09f 3937static const QEMUFileHooks rdma_write_hooks = {
2da776db
MH
3938 .before_ram_iterate = qemu_rdma_registration_start,
3939 .after_ram_iterate = qemu_rdma_registration_stop,
3940 .save_page = qemu_rdma_save_page,
3941};
3942
6ddd2d76
DB
3943
3944static void qio_channel_rdma_finalize(Object *obj)
3945{
3946 QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(obj);
74637e6f
LC
3947 if (rioc->rdmain) {
3948 qemu_rdma_cleanup(rioc->rdmain);
3949 g_free(rioc->rdmain);
3950 rioc->rdmain = NULL;
3951 }
3952 if (rioc->rdmaout) {
3953 qemu_rdma_cleanup(rioc->rdmaout);
3954 g_free(rioc->rdmaout);
3955 rioc->rdmaout = NULL;
6ddd2d76
DB
3956 }
3957}
3958
3959static void qio_channel_rdma_class_init(ObjectClass *klass,
3960 void *class_data G_GNUC_UNUSED)
3961{
3962 QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass);
3963
3964 ioc_klass->io_writev = qio_channel_rdma_writev;
3965 ioc_klass->io_readv = qio_channel_rdma_readv;
3966 ioc_klass->io_set_blocking = qio_channel_rdma_set_blocking;
3967 ioc_klass->io_close = qio_channel_rdma_close;
3968 ioc_klass->io_create_watch = qio_channel_rdma_create_watch;
4d9f675b 3969 ioc_klass->io_set_aio_fd_handler = qio_channel_rdma_set_aio_fd_handler;
54db882f 3970 ioc_klass->io_shutdown = qio_channel_rdma_shutdown;
6ddd2d76
DB
3971}
3972
3973static const TypeInfo qio_channel_rdma_info = {
3974 .parent = TYPE_QIO_CHANNEL,
3975 .name = TYPE_QIO_CHANNEL_RDMA,
3976 .instance_size = sizeof(QIOChannelRDMA),
3977 .instance_finalize = qio_channel_rdma_finalize,
3978 .class_init = qio_channel_rdma_class_init,
3979};
3980
3981static void qio_channel_rdma_register_types(void)
3982{
3983 type_register_static(&qio_channel_rdma_info);
3984}
3985
3986type_init(qio_channel_rdma_register_types);
3987
3988static QEMUFile *qemu_fopen_rdma(RDMAContext *rdma, const char *mode)
2da776db 3989{
6ddd2d76 3990 QIOChannelRDMA *rioc;
2da776db
MH
3991
3992 if (qemu_file_mode_is_not_valid(mode)) {
3993 return NULL;
3994 }
3995
6ddd2d76 3996 rioc = QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA));
2da776db
MH
3997
3998 if (mode[0] == 'w') {
6ddd2d76 3999 rioc->file = qemu_fopen_channel_output(QIO_CHANNEL(rioc));
74637e6f
LC
4000 rioc->rdmaout = rdma;
4001 rioc->rdmain = rdma->return_path;
6ddd2d76 4002 qemu_file_set_hooks(rioc->file, &rdma_write_hooks);
2da776db 4003 } else {
6ddd2d76 4004 rioc->file = qemu_fopen_channel_input(QIO_CHANNEL(rioc));
74637e6f
LC
4005 rioc->rdmain = rdma;
4006 rioc->rdmaout = rdma->return_path;
6ddd2d76 4007 qemu_file_set_hooks(rioc->file, &rdma_read_hooks);
2da776db
MH
4008 }
4009
6ddd2d76 4010 return rioc->file;
2da776db
MH
4011}
4012
4013static void rdma_accept_incoming_migration(void *opaque)
4014{
4015 RDMAContext *rdma = opaque;
4016 int ret;
4017 QEMUFile *f;
4018 Error *local_err = NULL, **errp = &local_err;
4019
24ec68ef 4020 trace_qemu_rdma_accept_incoming_migration();
2da776db
MH
4021 ret = qemu_rdma_accept(rdma);
4022
4023 if (ret) {
66988941 4024 ERROR(errp, "RDMA Migration initialization failed!");
2da776db
MH
4025 return;
4026 }
4027
24ec68ef 4028 trace_qemu_rdma_accept_incoming_migration_accepted();
2da776db 4029
55cc1b59
LC
4030 if (rdma->is_return_path) {
4031 return;
4032 }
4033
2da776db
MH
4034 f = qemu_fopen_rdma(rdma, "rb");
4035 if (f == NULL) {
66988941 4036 ERROR(errp, "could not qemu_fopen_rdma!");
2da776db
MH
4037 qemu_rdma_cleanup(rdma);
4038 return;
4039 }
4040
4041 rdma->migration_started_on_destination = 1;
22724f49 4042 migration_fd_process_incoming(f);
2da776db
MH
4043}
4044
4045void rdma_start_incoming_migration(const char *host_port, Error **errp)
4046{
4047 int ret;
449f91b2 4048 RDMAContext *rdma, *rdma_return_path = NULL;
2da776db
MH
4049 Error *local_err = NULL;
4050
733252de 4051 trace_rdma_start_incoming_migration();
2da776db
MH
4052 rdma = qemu_rdma_data_init(host_port, &local_err);
4053
4054 if (rdma == NULL) {
4055 goto err;
4056 }
4057
4058 ret = qemu_rdma_dest_init(rdma, &local_err);
4059
4060 if (ret) {
4061 goto err;
4062 }
4063
733252de 4064 trace_rdma_start_incoming_migration_after_dest_init();
2da776db
MH
4065
4066 ret = rdma_listen(rdma->listen_id, 5);
4067
4068 if (ret) {
66988941 4069 ERROR(errp, "listening on socket!");
2da776db
MH
4070 goto err;
4071 }
4072
733252de 4073 trace_rdma_start_incoming_migration_after_rdma_listen();
2da776db 4074
55cc1b59
LC
4075 /* initialize the RDMAContext for return path */
4076 if (migrate_postcopy()) {
4077 rdma_return_path = qemu_rdma_data_init(host_port, &local_err);
4078
4079 if (rdma_return_path == NULL) {
4080 goto err;
4081 }
4082
4083 qemu_rdma_return_path_dest_init(rdma_return_path, rdma);
4084 }
4085
82e1cc4b
FZ
4086 qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration,
4087 NULL, (void *)(intptr_t)rdma);
2da776db
MH
4088 return;
4089err:
4090 error_propagate(errp, local_err);
4091 g_free(rdma);
55cc1b59 4092 g_free(rdma_return_path);
2da776db
MH
4093}
4094
4095void rdma_start_outgoing_migration(void *opaque,
4096 const char *host_port, Error **errp)
4097{
4098 MigrationState *s = opaque;
d59ce6f3 4099 RDMAContext *rdma = qemu_rdma_data_init(host_port, errp);
55cc1b59 4100 RDMAContext *rdma_return_path = NULL;
2da776db
MH
4101 int ret = 0;
4102
4103 if (rdma == NULL) {
2da776db
MH
4104 goto err;
4105 }
4106
bbfb89e3
FZ
4107 ret = qemu_rdma_source_init(rdma,
4108 s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp);
2da776db
MH
4109
4110 if (ret) {
4111 goto err;
4112 }
4113
733252de 4114 trace_rdma_start_outgoing_migration_after_rdma_source_init();
d59ce6f3 4115 ret = qemu_rdma_connect(rdma, errp);
2da776db
MH
4116
4117 if (ret) {
4118 goto err;
4119 }
4120
55cc1b59
LC
4121 /* RDMA postcopy need a seprate queue pair for return path */
4122 if (migrate_postcopy()) {
4123 rdma_return_path = qemu_rdma_data_init(host_port, errp);
4124
4125 if (rdma_return_path == NULL) {
4126 goto err;
4127 }
4128
4129 ret = qemu_rdma_source_init(rdma_return_path,
4130 s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp);
4131
4132 if (ret) {
4133 goto err;
4134 }
4135
4136 ret = qemu_rdma_connect(rdma_return_path, errp);
4137
4138 if (ret) {
4139 goto err;
4140 }
4141
4142 rdma->return_path = rdma_return_path;
4143 rdma_return_path->return_path = rdma;
4144 rdma_return_path->is_return_path = true;
4145 }
4146
733252de 4147 trace_rdma_start_outgoing_migration_after_rdma_connect();
2da776db 4148
89a02a9f 4149 s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
cce8040b 4150 migrate_fd_connect(s, NULL);
2da776db
MH
4151 return;
4152err:
2da776db 4153 g_free(rdma);
55cc1b59 4154 g_free(rdma_return_path);
2da776db 4155}