]>
Commit | Line | Data |
---|---|---|
2da776db MH |
1 | /* |
2 | * RDMA protocol and interfaces | |
3 | * | |
4 | * Copyright IBM, Corp. 2010-2013 | |
5 | * | |
6 | * Authors: | |
7 | * Michael R. Hines <mrhines@us.ibm.com> | |
8 | * Jiuxing Liu <jl@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
11 | * later. See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | #include "qemu-common.h" | |
15 | #include "migration/migration.h" | |
16 | #include "migration/qemu-file.h" | |
17 | #include "exec/cpu-common.h" | |
18 | #include "qemu/main-loop.h" | |
19 | #include "qemu/sockets.h" | |
20 | #include "qemu/bitmap.h" | |
21 | #include "block/coroutine.h" | |
22 | #include <stdio.h> | |
23 | #include <sys/types.h> | |
24 | #include <sys/socket.h> | |
25 | #include <netdb.h> | |
26 | #include <arpa/inet.h> | |
27 | #include <string.h> | |
28 | #include <rdma/rdma_cma.h> | |
29 | ||
8cd31adc | 30 | //#define DEBUG_RDMA |
2da776db MH |
31 | //#define DEBUG_RDMA_VERBOSE |
32 | //#define DEBUG_RDMA_REALLY_VERBOSE | |
33 | ||
34 | #ifdef DEBUG_RDMA | |
35 | #define DPRINTF(fmt, ...) \ | |
36 | do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0) | |
37 | #else | |
38 | #define DPRINTF(fmt, ...) \ | |
39 | do { } while (0) | |
40 | #endif | |
41 | ||
42 | #ifdef DEBUG_RDMA_VERBOSE | |
43 | #define DDPRINTF(fmt, ...) \ | |
44 | do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0) | |
45 | #else | |
46 | #define DDPRINTF(fmt, ...) \ | |
47 | do { } while (0) | |
48 | #endif | |
49 | ||
50 | #ifdef DEBUG_RDMA_REALLY_VERBOSE | |
51 | #define DDDPRINTF(fmt, ...) \ | |
52 | do { printf("rdma: " fmt, ## __VA_ARGS__); } while (0) | |
53 | #else | |
54 | #define DDDPRINTF(fmt, ...) \ | |
55 | do { } while (0) | |
56 | #endif | |
57 | ||
58 | /* | |
59 | * Print and error on both the Monitor and the Log file. | |
60 | */ | |
61 | #define ERROR(errp, fmt, ...) \ | |
62 | do { \ | |
66988941 | 63 | fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \ |
2da776db MH |
64 | if (errp && (*(errp) == NULL)) { \ |
65 | error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \ | |
66 | } \ | |
67 | } while (0) | |
68 | ||
69 | #define RDMA_RESOLVE_TIMEOUT_MS 10000 | |
70 | ||
71 | /* Do not merge data if larger than this. */ | |
72 | #define RDMA_MERGE_MAX (2 * 1024 * 1024) | |
73 | #define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) | |
74 | ||
75 | #define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ | |
76 | ||
77 | /* | |
78 | * This is only for non-live state being migrated. | |
79 | * Instead of RDMA_WRITE messages, we use RDMA_SEND | |
80 | * messages for that state, which requires a different | |
81 | * delivery design than main memory. | |
82 | */ | |
83 | #define RDMA_SEND_INCREMENT 32768 | |
84 | ||
85 | /* | |
86 | * Maximum size infiniband SEND message | |
87 | */ | |
88 | #define RDMA_CONTROL_MAX_BUFFER (512 * 1024) | |
89 | #define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096 | |
90 | ||
91 | #define RDMA_CONTROL_VERSION_CURRENT 1 | |
92 | /* | |
93 | * Capabilities for negotiation. | |
94 | */ | |
95 | #define RDMA_CAPABILITY_PIN_ALL 0x01 | |
96 | ||
97 | /* | |
98 | * Add the other flags above to this list of known capabilities | |
99 | * as they are introduced. | |
100 | */ | |
101 | static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL; | |
102 | ||
103 | #define CHECK_ERROR_STATE() \ | |
104 | do { \ | |
105 | if (rdma->error_state) { \ | |
106 | if (!rdma->error_reported) { \ | |
107 | fprintf(stderr, "RDMA is in an error state waiting migration" \ | |
108 | " to abort!\n"); \ | |
109 | rdma->error_reported = 1; \ | |
110 | } \ | |
111 | return rdma->error_state; \ | |
112 | } \ | |
113 | } while (0); | |
114 | ||
115 | /* | |
116 | * A work request ID is 64-bits and we split up these bits | |
117 | * into 3 parts: | |
118 | * | |
119 | * bits 0-15 : type of control message, 2^16 | |
120 | * bits 16-29: ram block index, 2^14 | |
121 | * bits 30-63: ram block chunk number, 2^34 | |
122 | * | |
123 | * The last two bit ranges are only used for RDMA writes, | |
124 | * in order to track their completion and potentially | |
125 | * also track unregistration status of the message. | |
126 | */ | |
127 | #define RDMA_WRID_TYPE_SHIFT 0UL | |
128 | #define RDMA_WRID_BLOCK_SHIFT 16UL | |
129 | #define RDMA_WRID_CHUNK_SHIFT 30UL | |
130 | ||
131 | #define RDMA_WRID_TYPE_MASK \ | |
132 | ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL) | |
133 | ||
134 | #define RDMA_WRID_BLOCK_MASK \ | |
135 | (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL)) | |
136 | ||
137 | #define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK) | |
138 | ||
139 | /* | |
140 | * RDMA migration protocol: | |
141 | * 1. RDMA Writes (data messages, i.e. RAM) | |
142 | * 2. IB Send/Recv (control channel messages) | |
143 | */ | |
144 | enum { | |
145 | RDMA_WRID_NONE = 0, | |
146 | RDMA_WRID_RDMA_WRITE = 1, | |
147 | RDMA_WRID_SEND_CONTROL = 2000, | |
148 | RDMA_WRID_RECV_CONTROL = 4000, | |
149 | }; | |
150 | ||
151 | const char *wrid_desc[] = { | |
152 | [RDMA_WRID_NONE] = "NONE", | |
153 | [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA", | |
154 | [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND", | |
155 | [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV", | |
156 | }; | |
157 | ||
158 | /* | |
159 | * Work request IDs for IB SEND messages only (not RDMA writes). | |
160 | * This is used by the migration protocol to transmit | |
161 | * control messages (such as device state and registration commands) | |
162 | * | |
163 | * We could use more WRs, but we have enough for now. | |
164 | */ | |
165 | enum { | |
166 | RDMA_WRID_READY = 0, | |
167 | RDMA_WRID_DATA, | |
168 | RDMA_WRID_CONTROL, | |
169 | RDMA_WRID_MAX, | |
170 | }; | |
171 | ||
172 | /* | |
173 | * SEND/RECV IB Control Messages. | |
174 | */ | |
175 | enum { | |
176 | RDMA_CONTROL_NONE = 0, | |
177 | RDMA_CONTROL_ERROR, | |
178 | RDMA_CONTROL_READY, /* ready to receive */ | |
179 | RDMA_CONTROL_QEMU_FILE, /* QEMUFile-transmitted bytes */ | |
180 | RDMA_CONTROL_RAM_BLOCKS_REQUEST, /* RAMBlock synchronization */ | |
181 | RDMA_CONTROL_RAM_BLOCKS_RESULT, /* RAMBlock synchronization */ | |
182 | RDMA_CONTROL_COMPRESS, /* page contains repeat values */ | |
183 | RDMA_CONTROL_REGISTER_REQUEST, /* dynamic page registration */ | |
184 | RDMA_CONTROL_REGISTER_RESULT, /* key to use after registration */ | |
185 | RDMA_CONTROL_REGISTER_FINISHED, /* current iteration finished */ | |
186 | RDMA_CONTROL_UNREGISTER_REQUEST, /* dynamic UN-registration */ | |
187 | RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */ | |
188 | }; | |
189 | ||
190 | const char *control_desc[] = { | |
191 | [RDMA_CONTROL_NONE] = "NONE", | |
192 | [RDMA_CONTROL_ERROR] = "ERROR", | |
193 | [RDMA_CONTROL_READY] = "READY", | |
194 | [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE", | |
195 | [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST", | |
196 | [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT", | |
197 | [RDMA_CONTROL_COMPRESS] = "COMPRESS", | |
198 | [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST", | |
199 | [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT", | |
200 | [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED", | |
201 | [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST", | |
202 | [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED", | |
203 | }; | |
204 | ||
205 | /* | |
206 | * Memory and MR structures used to represent an IB Send/Recv work request. | |
207 | * This is *not* used for RDMA writes, only IB Send/Recv. | |
208 | */ | |
209 | typedef struct { | |
210 | uint8_t control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */ | |
211 | struct ibv_mr *control_mr; /* registration metadata */ | |
212 | size_t control_len; /* length of the message */ | |
213 | uint8_t *control_curr; /* start of unconsumed bytes */ | |
214 | } RDMAWorkRequestData; | |
215 | ||
216 | /* | |
217 | * Negotiate RDMA capabilities during connection-setup time. | |
218 | */ | |
219 | typedef struct { | |
220 | uint32_t version; | |
221 | uint32_t flags; | |
222 | } RDMACapabilities; | |
223 | ||
224 | static void caps_to_network(RDMACapabilities *cap) | |
225 | { | |
226 | cap->version = htonl(cap->version); | |
227 | cap->flags = htonl(cap->flags); | |
228 | } | |
229 | ||
230 | static void network_to_caps(RDMACapabilities *cap) | |
231 | { | |
232 | cap->version = ntohl(cap->version); | |
233 | cap->flags = ntohl(cap->flags); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Representation of a RAMBlock from an RDMA perspective. | |
238 | * This is not transmitted, only local. | |
239 | * This and subsequent structures cannot be linked lists | |
240 | * because we're using a single IB message to transmit | |
241 | * the information. It's small anyway, so a list is overkill. | |
242 | */ | |
243 | typedef struct RDMALocalBlock { | |
244 | uint8_t *local_host_addr; /* local virtual address */ | |
245 | uint64_t remote_host_addr; /* remote virtual address */ | |
246 | uint64_t offset; | |
247 | uint64_t length; | |
248 | struct ibv_mr **pmr; /* MRs for chunk-level registration */ | |
249 | struct ibv_mr *mr; /* MR for non-chunk-level registration */ | |
250 | uint32_t *remote_keys; /* rkeys for chunk-level registration */ | |
251 | uint32_t remote_rkey; /* rkeys for non-chunk-level registration */ | |
252 | int index; /* which block are we */ | |
253 | bool is_ram_block; | |
254 | int nb_chunks; | |
255 | unsigned long *transit_bitmap; | |
256 | unsigned long *unregister_bitmap; | |
257 | } RDMALocalBlock; | |
258 | ||
259 | /* | |
260 | * Also represents a RAMblock, but only on the dest. | |
261 | * This gets transmitted by the dest during connection-time | |
262 | * to the source VM and then is used to populate the | |
263 | * corresponding RDMALocalBlock with | |
264 | * the information needed to perform the actual RDMA. | |
265 | */ | |
266 | typedef struct QEMU_PACKED RDMARemoteBlock { | |
267 | uint64_t remote_host_addr; | |
268 | uint64_t offset; | |
269 | uint64_t length; | |
270 | uint32_t remote_rkey; | |
271 | uint32_t padding; | |
272 | } RDMARemoteBlock; | |
273 | ||
274 | static uint64_t htonll(uint64_t v) | |
275 | { | |
276 | union { uint32_t lv[2]; uint64_t llv; } u; | |
277 | u.lv[0] = htonl(v >> 32); | |
278 | u.lv[1] = htonl(v & 0xFFFFFFFFULL); | |
279 | return u.llv; | |
280 | } | |
281 | ||
282 | static uint64_t ntohll(uint64_t v) { | |
283 | union { uint32_t lv[2]; uint64_t llv; } u; | |
284 | u.llv = v; | |
285 | return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]); | |
286 | } | |
287 | ||
288 | static void remote_block_to_network(RDMARemoteBlock *rb) | |
289 | { | |
290 | rb->remote_host_addr = htonll(rb->remote_host_addr); | |
291 | rb->offset = htonll(rb->offset); | |
292 | rb->length = htonll(rb->length); | |
293 | rb->remote_rkey = htonl(rb->remote_rkey); | |
294 | } | |
295 | ||
296 | static void network_to_remote_block(RDMARemoteBlock *rb) | |
297 | { | |
298 | rb->remote_host_addr = ntohll(rb->remote_host_addr); | |
299 | rb->offset = ntohll(rb->offset); | |
300 | rb->length = ntohll(rb->length); | |
301 | rb->remote_rkey = ntohl(rb->remote_rkey); | |
302 | } | |
303 | ||
304 | /* | |
305 | * Virtual address of the above structures used for transmitting | |
306 | * the RAMBlock descriptions at connection-time. | |
307 | * This structure is *not* transmitted. | |
308 | */ | |
309 | typedef struct RDMALocalBlocks { | |
310 | int nb_blocks; | |
311 | bool init; /* main memory init complete */ | |
312 | RDMALocalBlock *block; | |
313 | } RDMALocalBlocks; | |
314 | ||
315 | /* | |
316 | * Main data structure for RDMA state. | |
317 | * While there is only one copy of this structure being allocated right now, | |
318 | * this is the place where one would start if you wanted to consider | |
319 | * having more than one RDMA connection open at the same time. | |
320 | */ | |
321 | typedef struct RDMAContext { | |
322 | char *host; | |
323 | int port; | |
324 | ||
1f22364b | 325 | RDMAWorkRequestData wr_data[RDMA_WRID_MAX]; |
2da776db MH |
326 | |
327 | /* | |
328 | * This is used by *_exchange_send() to figure out whether or not | |
329 | * the initial "READY" message has already been received or not. | |
330 | * This is because other functions may potentially poll() and detect | |
331 | * the READY message before send() does, in which case we need to | |
332 | * know if it completed. | |
333 | */ | |
334 | int control_ready_expected; | |
335 | ||
336 | /* number of outstanding writes */ | |
337 | int nb_sent; | |
338 | ||
339 | /* store info about current buffer so that we can | |
340 | merge it with future sends */ | |
341 | uint64_t current_addr; | |
342 | uint64_t current_length; | |
343 | /* index of ram block the current buffer belongs to */ | |
344 | int current_index; | |
345 | /* index of the chunk in the current ram block */ | |
346 | int current_chunk; | |
347 | ||
348 | bool pin_all; | |
349 | ||
350 | /* | |
351 | * infiniband-specific variables for opening the device | |
352 | * and maintaining connection state and so forth. | |
353 | * | |
354 | * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in | |
355 | * cm_id->verbs, cm_id->channel, and cm_id->qp. | |
356 | */ | |
357 | struct rdma_cm_id *cm_id; /* connection manager ID */ | |
358 | struct rdma_cm_id *listen_id; | |
5a91337c | 359 | bool connected; |
2da776db MH |
360 | |
361 | struct ibv_context *verbs; | |
362 | struct rdma_event_channel *channel; | |
363 | struct ibv_qp *qp; /* queue pair */ | |
364 | struct ibv_comp_channel *comp_channel; /* completion channel */ | |
365 | struct ibv_pd *pd; /* protection domain */ | |
366 | struct ibv_cq *cq; /* completion queue */ | |
367 | ||
368 | /* | |
369 | * If a previous write failed (perhaps because of a failed | |
370 | * memory registration, then do not attempt any future work | |
371 | * and remember the error state. | |
372 | */ | |
373 | int error_state; | |
374 | int error_reported; | |
375 | ||
376 | /* | |
377 | * Description of ram blocks used throughout the code. | |
378 | */ | |
379 | RDMALocalBlocks local_ram_blocks; | |
380 | RDMARemoteBlock *block; | |
381 | ||
382 | /* | |
383 | * Migration on *destination* started. | |
384 | * Then use coroutine yield function. | |
385 | * Source runs in a thread, so we don't care. | |
386 | */ | |
387 | int migration_started_on_destination; | |
388 | ||
389 | int total_registrations; | |
390 | int total_writes; | |
391 | ||
392 | int unregister_current, unregister_next; | |
393 | uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX]; | |
394 | ||
395 | GHashTable *blockmap; | |
396 | } RDMAContext; | |
397 | ||
398 | /* | |
399 | * Interface to the rest of the migration call stack. | |
400 | */ | |
401 | typedef struct QEMUFileRDMA { | |
402 | RDMAContext *rdma; | |
403 | size_t len; | |
404 | void *file; | |
405 | } QEMUFileRDMA; | |
406 | ||
407 | /* | |
408 | * Main structure for IB Send/Recv control messages. | |
409 | * This gets prepended at the beginning of every Send/Recv. | |
410 | */ | |
411 | typedef struct QEMU_PACKED { | |
412 | uint32_t len; /* Total length of data portion */ | |
413 | uint32_t type; /* which control command to perform */ | |
414 | uint32_t repeat; /* number of commands in data portion of same type */ | |
415 | uint32_t padding; | |
416 | } RDMAControlHeader; | |
417 | ||
418 | static void control_to_network(RDMAControlHeader *control) | |
419 | { | |
420 | control->type = htonl(control->type); | |
421 | control->len = htonl(control->len); | |
422 | control->repeat = htonl(control->repeat); | |
423 | } | |
424 | ||
425 | static void network_to_control(RDMAControlHeader *control) | |
426 | { | |
427 | control->type = ntohl(control->type); | |
428 | control->len = ntohl(control->len); | |
429 | control->repeat = ntohl(control->repeat); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Register a single Chunk. | |
434 | * Information sent by the source VM to inform the dest | |
435 | * to register an single chunk of memory before we can perform | |
436 | * the actual RDMA operation. | |
437 | */ | |
438 | typedef struct QEMU_PACKED { | |
439 | union QEMU_PACKED { | |
440 | uint64_t current_addr; /* offset into the ramblock of the chunk */ | |
441 | uint64_t chunk; /* chunk to lookup if unregistering */ | |
442 | } key; | |
443 | uint32_t current_index; /* which ramblock the chunk belongs to */ | |
444 | uint32_t padding; | |
445 | uint64_t chunks; /* how many sequential chunks to register */ | |
446 | } RDMARegister; | |
447 | ||
448 | static void register_to_network(RDMARegister *reg) | |
449 | { | |
450 | reg->key.current_addr = htonll(reg->key.current_addr); | |
451 | reg->current_index = htonl(reg->current_index); | |
452 | reg->chunks = htonll(reg->chunks); | |
453 | } | |
454 | ||
455 | static void network_to_register(RDMARegister *reg) | |
456 | { | |
457 | reg->key.current_addr = ntohll(reg->key.current_addr); | |
458 | reg->current_index = ntohl(reg->current_index); | |
459 | reg->chunks = ntohll(reg->chunks); | |
460 | } | |
461 | ||
462 | typedef struct QEMU_PACKED { | |
463 | uint32_t value; /* if zero, we will madvise() */ | |
464 | uint32_t block_idx; /* which ram block index */ | |
465 | uint64_t offset; /* where in the remote ramblock this chunk */ | |
466 | uint64_t length; /* length of the chunk */ | |
467 | } RDMACompress; | |
468 | ||
469 | static void compress_to_network(RDMACompress *comp) | |
470 | { | |
471 | comp->value = htonl(comp->value); | |
472 | comp->block_idx = htonl(comp->block_idx); | |
473 | comp->offset = htonll(comp->offset); | |
474 | comp->length = htonll(comp->length); | |
475 | } | |
476 | ||
477 | static void network_to_compress(RDMACompress *comp) | |
478 | { | |
479 | comp->value = ntohl(comp->value); | |
480 | comp->block_idx = ntohl(comp->block_idx); | |
481 | comp->offset = ntohll(comp->offset); | |
482 | comp->length = ntohll(comp->length); | |
483 | } | |
484 | ||
485 | /* | |
486 | * The result of the dest's memory registration produces an "rkey" | |
487 | * which the source VM must reference in order to perform | |
488 | * the RDMA operation. | |
489 | */ | |
490 | typedef struct QEMU_PACKED { | |
491 | uint32_t rkey; | |
492 | uint32_t padding; | |
493 | uint64_t host_addr; | |
494 | } RDMARegisterResult; | |
495 | ||
496 | static void result_to_network(RDMARegisterResult *result) | |
497 | { | |
498 | result->rkey = htonl(result->rkey); | |
499 | result->host_addr = htonll(result->host_addr); | |
500 | }; | |
501 | ||
502 | static void network_to_result(RDMARegisterResult *result) | |
503 | { | |
504 | result->rkey = ntohl(result->rkey); | |
505 | result->host_addr = ntohll(result->host_addr); | |
506 | }; | |
507 | ||
508 | const char *print_wrid(int wrid); | |
509 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, | |
510 | uint8_t *data, RDMAControlHeader *resp, | |
511 | int *resp_idx, | |
512 | int (*callback)(RDMAContext *rdma)); | |
513 | ||
dd286ed7 IY |
514 | static inline uint64_t ram_chunk_index(const uint8_t *start, |
515 | const uint8_t *host) | |
2da776db MH |
516 | { |
517 | return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; | |
518 | } | |
519 | ||
dd286ed7 | 520 | static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, |
2da776db MH |
521 | uint64_t i) |
522 | { | |
523 | return (uint8_t *) (((uintptr_t) rdma_ram_block->local_host_addr) | |
524 | + (i << RDMA_REG_CHUNK_SHIFT)); | |
525 | } | |
526 | ||
dd286ed7 IY |
527 | static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, |
528 | uint64_t i) | |
2da776db MH |
529 | { |
530 | uint8_t *result = ram_chunk_start(rdma_ram_block, i) + | |
531 | (1UL << RDMA_REG_CHUNK_SHIFT); | |
532 | ||
533 | if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { | |
534 | result = rdma_ram_block->local_host_addr + rdma_ram_block->length; | |
535 | } | |
536 | ||
537 | return result; | |
538 | } | |
539 | ||
540 | static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr, | |
541 | ram_addr_t block_offset, uint64_t length) | |
542 | { | |
543 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
544 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
545 | (void *) block_offset); | |
546 | RDMALocalBlock *old = local->block; | |
547 | ||
548 | assert(block == NULL); | |
549 | ||
550 | local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1)); | |
551 | ||
552 | if (local->nb_blocks) { | |
553 | int x; | |
554 | ||
555 | for (x = 0; x < local->nb_blocks; x++) { | |
556 | g_hash_table_remove(rdma->blockmap, (void *)old[x].offset); | |
557 | g_hash_table_insert(rdma->blockmap, (void *)old[x].offset, | |
558 | &local->block[x]); | |
559 | } | |
560 | memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks); | |
561 | g_free(old); | |
562 | } | |
563 | ||
564 | block = &local->block[local->nb_blocks]; | |
565 | ||
566 | block->local_host_addr = host_addr; | |
567 | block->offset = block_offset; | |
568 | block->length = length; | |
569 | block->index = local->nb_blocks; | |
570 | block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL; | |
571 | block->transit_bitmap = bitmap_new(block->nb_chunks); | |
572 | bitmap_clear(block->transit_bitmap, 0, block->nb_chunks); | |
573 | block->unregister_bitmap = bitmap_new(block->nb_chunks); | |
574 | bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks); | |
575 | block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t)); | |
576 | ||
577 | block->is_ram_block = local->init ? false : true; | |
578 | ||
579 | g_hash_table_insert(rdma->blockmap, (void *) block_offset, block); | |
580 | ||
581 | DDPRINTF("Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 | |
582 | " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n", | |
583 | local->nb_blocks, (uint64_t) block->local_host_addr, block->offset, | |
584 | block->length, (uint64_t) (block->local_host_addr + block->length), | |
585 | BITS_TO_LONGS(block->nb_chunks) * | |
586 | sizeof(unsigned long) * 8, block->nb_chunks); | |
587 | ||
588 | local->nb_blocks++; | |
589 | ||
590 | return 0; | |
591 | } | |
592 | ||
593 | /* | |
594 | * Memory regions need to be registered with the device and queue pairs setup | |
595 | * in advanced before the migration starts. This tells us where the RAM blocks | |
596 | * are so that we can register them individually. | |
597 | */ | |
598 | static void qemu_rdma_init_one_block(void *host_addr, | |
599 | ram_addr_t block_offset, ram_addr_t length, void *opaque) | |
600 | { | |
601 | __qemu_rdma_add_block(opaque, host_addr, block_offset, length); | |
602 | } | |
603 | ||
604 | /* | |
605 | * Identify the RAMBlocks and their quantity. They will be references to | |
606 | * identify chunk boundaries inside each RAMBlock and also be referenced | |
607 | * during dynamic page registration. | |
608 | */ | |
609 | static int qemu_rdma_init_ram_blocks(RDMAContext *rdma) | |
610 | { | |
611 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
612 | ||
613 | assert(rdma->blockmap == NULL); | |
614 | rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal); | |
615 | memset(local, 0, sizeof *local); | |
616 | qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma); | |
617 | DPRINTF("Allocated %d local ram block structures\n", local->nb_blocks); | |
618 | rdma->block = (RDMARemoteBlock *) g_malloc0(sizeof(RDMARemoteBlock) * | |
619 | rdma->local_ram_blocks.nb_blocks); | |
620 | local->init = true; | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset) | |
625 | { | |
626 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
627 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
628 | (void *) block_offset); | |
629 | RDMALocalBlock *old = local->block; | |
630 | int x; | |
631 | ||
632 | assert(block); | |
633 | ||
634 | if (block->pmr) { | |
635 | int j; | |
636 | ||
637 | for (j = 0; j < block->nb_chunks; j++) { | |
638 | if (!block->pmr[j]) { | |
639 | continue; | |
640 | } | |
641 | ibv_dereg_mr(block->pmr[j]); | |
642 | rdma->total_registrations--; | |
643 | } | |
644 | g_free(block->pmr); | |
645 | block->pmr = NULL; | |
646 | } | |
647 | ||
648 | if (block->mr) { | |
649 | ibv_dereg_mr(block->mr); | |
650 | rdma->total_registrations--; | |
651 | block->mr = NULL; | |
652 | } | |
653 | ||
654 | g_free(block->transit_bitmap); | |
655 | block->transit_bitmap = NULL; | |
656 | ||
657 | g_free(block->unregister_bitmap); | |
658 | block->unregister_bitmap = NULL; | |
659 | ||
660 | g_free(block->remote_keys); | |
661 | block->remote_keys = NULL; | |
662 | ||
663 | for (x = 0; x < local->nb_blocks; x++) { | |
664 | g_hash_table_remove(rdma->blockmap, (void *)old[x].offset); | |
665 | } | |
666 | ||
667 | if (local->nb_blocks > 1) { | |
668 | ||
669 | local->block = g_malloc0(sizeof(RDMALocalBlock) * | |
670 | (local->nb_blocks - 1)); | |
671 | ||
672 | if (block->index) { | |
673 | memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index); | |
674 | } | |
675 | ||
676 | if (block->index < (local->nb_blocks - 1)) { | |
677 | memcpy(local->block + block->index, old + (block->index + 1), | |
678 | sizeof(RDMALocalBlock) * | |
679 | (local->nb_blocks - (block->index + 1))); | |
680 | } | |
681 | } else { | |
682 | assert(block == local->block); | |
683 | local->block = NULL; | |
684 | } | |
685 | ||
686 | DDPRINTF("Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 | |
687 | " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n", | |
688 | local->nb_blocks, (uint64_t) block->local_host_addr, block->offset, | |
689 | block->length, (uint64_t) (block->local_host_addr + block->length), | |
690 | BITS_TO_LONGS(block->nb_chunks) * | |
691 | sizeof(unsigned long) * 8, block->nb_chunks); | |
692 | ||
693 | g_free(old); | |
694 | ||
695 | local->nb_blocks--; | |
696 | ||
697 | if (local->nb_blocks) { | |
698 | for (x = 0; x < local->nb_blocks; x++) { | |
699 | g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset, | |
700 | &local->block[x]); | |
701 | } | |
702 | } | |
703 | ||
704 | return 0; | |
705 | } | |
706 | ||
707 | /* | |
708 | * Put in the log file which RDMA device was opened and the details | |
709 | * associated with that device. | |
710 | */ | |
711 | static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs) | |
712 | { | |
7fc5b13f MH |
713 | struct ibv_port_attr port; |
714 | ||
715 | if (ibv_query_port(verbs, 1, &port)) { | |
716 | fprintf(stderr, "FAILED TO QUERY PORT INFORMATION!\n"); | |
717 | return; | |
718 | } | |
719 | ||
2da776db MH |
720 | printf("%s RDMA Device opened: kernel name %s " |
721 | "uverbs device name %s, " | |
7fc5b13f MH |
722 | "infiniband_verbs class device path %s, " |
723 | "infiniband class device path %s, " | |
724 | "transport: (%d) %s\n", | |
2da776db MH |
725 | who, |
726 | verbs->device->name, | |
727 | verbs->device->dev_name, | |
728 | verbs->device->dev_path, | |
7fc5b13f MH |
729 | verbs->device->ibdev_path, |
730 | port.link_layer, | |
731 | (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" : | |
732 | ((port.link_layer == IBV_LINK_LAYER_ETHERNET) | |
733 | ? "Ethernet" : "Unknown")); | |
2da776db MH |
734 | } |
735 | ||
736 | /* | |
737 | * Put in the log file the RDMA gid addressing information, | |
738 | * useful for folks who have trouble understanding the | |
739 | * RDMA device hierarchy in the kernel. | |
740 | */ | |
741 | static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id) | |
742 | { | |
743 | char sgid[33]; | |
744 | char dgid[33]; | |
745 | inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid); | |
746 | inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid); | |
747 | DPRINTF("%s Source GID: %s, Dest GID: %s\n", who, sgid, dgid); | |
748 | } | |
749 | ||
7fc5b13f MH |
750 | /* |
751 | * As of now, IPv6 over RoCE / iWARP is not supported by linux. | |
752 | * We will try the next addrinfo struct, and fail if there are | |
753 | * no other valid addresses to bind against. | |
754 | * | |
755 | * If user is listening on '[::]', then we will not have a opened a device | |
756 | * yet and have no way of verifying if the device is RoCE or not. | |
757 | * | |
758 | * In this case, the source VM will throw an error for ALL types of | |
759 | * connections (both IPv4 and IPv6) if the destination machine does not have | |
760 | * a regular infiniband network available for use. | |
761 | * | |
4c293dc6 | 762 | * The only way to guarantee that an error is thrown for broken kernels is |
7fc5b13f MH |
763 | * for the management software to choose a *specific* interface at bind time |
764 | * and validate what time of hardware it is. | |
765 | * | |
766 | * Unfortunately, this puts the user in a fix: | |
767 | * | |
768 | * If the source VM connects with an IPv4 address without knowing that the | |
769 | * destination has bound to '[::]' the migration will unconditionally fail | |
770 | * unless the management software is explicitly listening on the the IPv4 | |
771 | * address while using a RoCE-based device. | |
772 | * | |
773 | * If the source VM connects with an IPv6 address, then we're OK because we can | |
774 | * throw an error on the source (and similarly on the destination). | |
775 | * | |
776 | * But in mixed environments, this will be broken for a while until it is fixed | |
777 | * inside linux. | |
778 | * | |
779 | * We do provide a *tiny* bit of help in this function: We can list all of the | |
780 | * devices in the system and check to see if all the devices are RoCE or | |
781 | * Infiniband. | |
782 | * | |
783 | * If we detect that we have a *pure* RoCE environment, then we can safely | |
4c293dc6 | 784 | * thrown an error even if the management software has specified '[::]' as the |
7fc5b13f MH |
785 | * bind address. |
786 | * | |
787 | * However, if there is are multiple hetergeneous devices, then we cannot make | |
788 | * this assumption and the user just has to be sure they know what they are | |
789 | * doing. | |
790 | * | |
791 | * Patches are being reviewed on linux-rdma. | |
792 | */ | |
793 | static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs) | |
794 | { | |
795 | struct ibv_port_attr port_attr; | |
796 | ||
797 | /* This bug only exists in linux, to our knowledge. */ | |
798 | #ifdef CONFIG_LINUX | |
799 | ||
800 | /* | |
801 | * Verbs are only NULL if management has bound to '[::]'. | |
802 | * | |
803 | * Let's iterate through all the devices and see if there any pure IB | |
804 | * devices (non-ethernet). | |
805 | * | |
806 | * If not, then we can safely proceed with the migration. | |
4c293dc6 | 807 | * Otherwise, there are no guarantees until the bug is fixed in linux. |
7fc5b13f MH |
808 | */ |
809 | if (!verbs) { | |
810 | int num_devices, x; | |
811 | struct ibv_device ** dev_list = ibv_get_device_list(&num_devices); | |
812 | bool roce_found = false; | |
813 | bool ib_found = false; | |
814 | ||
815 | for (x = 0; x < num_devices; x++) { | |
816 | verbs = ibv_open_device(dev_list[x]); | |
817 | ||
818 | if (ibv_query_port(verbs, 1, &port_attr)) { | |
819 | ibv_close_device(verbs); | |
820 | ERROR(errp, "Could not query initial IB port"); | |
821 | return -EINVAL; | |
822 | } | |
823 | ||
824 | if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) { | |
825 | ib_found = true; | |
826 | } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { | |
827 | roce_found = true; | |
828 | } | |
829 | ||
830 | ibv_close_device(verbs); | |
831 | ||
832 | } | |
833 | ||
834 | if (roce_found) { | |
835 | if (ib_found) { | |
836 | fprintf(stderr, "WARN: migrations may fail:" | |
837 | " IPv6 over RoCE / iWARP in linux" | |
838 | " is broken. But since you appear to have a" | |
839 | " mixed RoCE / IB environment, be sure to only" | |
840 | " migrate over the IB fabric until the kernel " | |
841 | " fixes the bug.\n"); | |
842 | } else { | |
843 | ERROR(errp, "You only have RoCE / iWARP devices in your systems" | |
844 | " and your management software has specified '[::]'" | |
845 | ", but IPv6 over RoCE / iWARP is not supported in Linux."); | |
846 | return -ENONET; | |
847 | } | |
848 | } | |
849 | ||
850 | return 0; | |
851 | } | |
852 | ||
853 | /* | |
854 | * If we have a verbs context, that means that some other than '[::]' was | |
855 | * used by the management software for binding. In which case we can actually | |
856 | * warn the user about a potential broken kernel; | |
857 | */ | |
858 | ||
859 | /* IB ports start with 1, not 0 */ | |
860 | if (ibv_query_port(verbs, 1, &port_attr)) { | |
861 | ERROR(errp, "Could not query initial IB port"); | |
862 | return -EINVAL; | |
863 | } | |
864 | ||
865 | if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { | |
866 | ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 " | |
867 | "(but patches on linux-rdma in progress)"); | |
868 | return -ENONET; | |
869 | } | |
870 | ||
871 | #endif | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
2da776db MH |
876 | /* |
877 | * Figure out which RDMA device corresponds to the requested IP hostname | |
878 | * Also create the initial connection manager identifiers for opening | |
879 | * the connection. | |
880 | */ | |
881 | static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp) | |
882 | { | |
883 | int ret; | |
7fc5b13f | 884 | struct rdma_addrinfo *res; |
2da776db MH |
885 | char port_str[16]; |
886 | struct rdma_cm_event *cm_event; | |
887 | char ip[40] = "unknown"; | |
7fc5b13f | 888 | struct rdma_addrinfo *e; |
2da776db MH |
889 | |
890 | if (rdma->host == NULL || !strcmp(rdma->host, "")) { | |
66988941 | 891 | ERROR(errp, "RDMA hostname has not been set"); |
7fc5b13f | 892 | return -EINVAL; |
2da776db MH |
893 | } |
894 | ||
895 | /* create CM channel */ | |
896 | rdma->channel = rdma_create_event_channel(); | |
897 | if (!rdma->channel) { | |
66988941 | 898 | ERROR(errp, "could not create CM channel"); |
7fc5b13f | 899 | return -EINVAL; |
2da776db MH |
900 | } |
901 | ||
902 | /* create CM id */ | |
903 | ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP); | |
904 | if (ret) { | |
66988941 | 905 | ERROR(errp, "could not create channel id"); |
2da776db MH |
906 | goto err_resolve_create_id; |
907 | } | |
908 | ||
909 | snprintf(port_str, 16, "%d", rdma->port); | |
910 | port_str[15] = '\0'; | |
911 | ||
7fc5b13f | 912 | ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res); |
2da776db | 913 | if (ret < 0) { |
7fc5b13f | 914 | ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host); |
2da776db MH |
915 | goto err_resolve_get_addr; |
916 | } | |
917 | ||
6470215b MH |
918 | for (e = res; e != NULL; e = e->ai_next) { |
919 | inet_ntop(e->ai_family, | |
7fc5b13f | 920 | &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); |
6470215b | 921 | DPRINTF("Trying %s => %s\n", rdma->host, ip); |
2da776db | 922 | |
7fc5b13f | 923 | ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr, |
6470215b MH |
924 | RDMA_RESOLVE_TIMEOUT_MS); |
925 | if (!ret) { | |
c89aa2f1 MH |
926 | if (e->ai_family == AF_INET6) { |
927 | ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs); | |
928 | if (ret) { | |
929 | continue; | |
930 | } | |
7fc5b13f | 931 | } |
6470215b MH |
932 | goto route; |
933 | } | |
2da776db MH |
934 | } |
935 | ||
6470215b MH |
936 | ERROR(errp, "could not resolve address %s", rdma->host); |
937 | goto err_resolve_get_addr; | |
938 | ||
939 | route: | |
2da776db MH |
940 | qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id); |
941 | ||
942 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
943 | if (ret) { | |
66988941 | 944 | ERROR(errp, "could not perform event_addr_resolved"); |
2da776db MH |
945 | goto err_resolve_get_addr; |
946 | } | |
947 | ||
948 | if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) { | |
66988941 | 949 | ERROR(errp, "result not equal to event_addr_resolved %s", |
2da776db MH |
950 | rdma_event_str(cm_event->event)); |
951 | perror("rdma_resolve_addr"); | |
2a934347 | 952 | rdma_ack_cm_event(cm_event); |
7fc5b13f | 953 | ret = -EINVAL; |
2da776db MH |
954 | goto err_resolve_get_addr; |
955 | } | |
956 | rdma_ack_cm_event(cm_event); | |
957 | ||
958 | /* resolve route */ | |
959 | ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS); | |
960 | if (ret) { | |
66988941 | 961 | ERROR(errp, "could not resolve rdma route"); |
2da776db MH |
962 | goto err_resolve_get_addr; |
963 | } | |
964 | ||
965 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
966 | if (ret) { | |
66988941 | 967 | ERROR(errp, "could not perform event_route_resolved"); |
2da776db MH |
968 | goto err_resolve_get_addr; |
969 | } | |
970 | if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) { | |
66988941 | 971 | ERROR(errp, "result not equal to event_route_resolved: %s", |
2da776db MH |
972 | rdma_event_str(cm_event->event)); |
973 | rdma_ack_cm_event(cm_event); | |
7fc5b13f | 974 | ret = -EINVAL; |
2da776db MH |
975 | goto err_resolve_get_addr; |
976 | } | |
977 | rdma_ack_cm_event(cm_event); | |
978 | rdma->verbs = rdma->cm_id->verbs; | |
979 | qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs); | |
980 | qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id); | |
981 | return 0; | |
982 | ||
983 | err_resolve_get_addr: | |
984 | rdma_destroy_id(rdma->cm_id); | |
985 | rdma->cm_id = NULL; | |
986 | err_resolve_create_id: | |
987 | rdma_destroy_event_channel(rdma->channel); | |
988 | rdma->channel = NULL; | |
7fc5b13f | 989 | return ret; |
2da776db MH |
990 | } |
991 | ||
992 | /* | |
993 | * Create protection domain and completion queues | |
994 | */ | |
995 | static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma) | |
996 | { | |
997 | /* allocate pd */ | |
998 | rdma->pd = ibv_alloc_pd(rdma->verbs); | |
999 | if (!rdma->pd) { | |
1000 | fprintf(stderr, "failed to allocate protection domain\n"); | |
1001 | return -1; | |
1002 | } | |
1003 | ||
1004 | /* create completion channel */ | |
1005 | rdma->comp_channel = ibv_create_comp_channel(rdma->verbs); | |
1006 | if (!rdma->comp_channel) { | |
1007 | fprintf(stderr, "failed to allocate completion channel\n"); | |
1008 | goto err_alloc_pd_cq; | |
1009 | } | |
1010 | ||
1011 | /* | |
1012 | * Completion queue can be filled by both read and write work requests, | |
1013 | * so must reflect the sum of both possible queue sizes. | |
1014 | */ | |
1015 | rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), | |
1016 | NULL, rdma->comp_channel, 0); | |
1017 | if (!rdma->cq) { | |
1018 | fprintf(stderr, "failed to allocate completion queue\n"); | |
1019 | goto err_alloc_pd_cq; | |
1020 | } | |
1021 | ||
1022 | return 0; | |
1023 | ||
1024 | err_alloc_pd_cq: | |
1025 | if (rdma->pd) { | |
1026 | ibv_dealloc_pd(rdma->pd); | |
1027 | } | |
1028 | if (rdma->comp_channel) { | |
1029 | ibv_destroy_comp_channel(rdma->comp_channel); | |
1030 | } | |
1031 | rdma->pd = NULL; | |
1032 | rdma->comp_channel = NULL; | |
1033 | return -1; | |
1034 | ||
1035 | } | |
1036 | ||
1037 | /* | |
1038 | * Create queue pairs. | |
1039 | */ | |
1040 | static int qemu_rdma_alloc_qp(RDMAContext *rdma) | |
1041 | { | |
1042 | struct ibv_qp_init_attr attr = { 0 }; | |
1043 | int ret; | |
1044 | ||
1045 | attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX; | |
1046 | attr.cap.max_recv_wr = 3; | |
1047 | attr.cap.max_send_sge = 1; | |
1048 | attr.cap.max_recv_sge = 1; | |
1049 | attr.send_cq = rdma->cq; | |
1050 | attr.recv_cq = rdma->cq; | |
1051 | attr.qp_type = IBV_QPT_RC; | |
1052 | ||
1053 | ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr); | |
1054 | if (ret) { | |
1055 | return -1; | |
1056 | } | |
1057 | ||
1058 | rdma->qp = rdma->cm_id->qp; | |
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma) | |
1063 | { | |
1064 | int i; | |
1065 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
1066 | ||
1067 | for (i = 0; i < local->nb_blocks; i++) { | |
1068 | local->block[i].mr = | |
1069 | ibv_reg_mr(rdma->pd, | |
1070 | local->block[i].local_host_addr, | |
1071 | local->block[i].length, | |
1072 | IBV_ACCESS_LOCAL_WRITE | | |
1073 | IBV_ACCESS_REMOTE_WRITE | |
1074 | ); | |
1075 | if (!local->block[i].mr) { | |
1076 | perror("Failed to register local dest ram block!\n"); | |
1077 | break; | |
1078 | } | |
1079 | rdma->total_registrations++; | |
1080 | } | |
1081 | ||
1082 | if (i >= local->nb_blocks) { | |
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | for (i--; i >= 0; i--) { | |
1087 | ibv_dereg_mr(local->block[i].mr); | |
1088 | rdma->total_registrations--; | |
1089 | } | |
1090 | ||
1091 | return -1; | |
1092 | ||
1093 | } | |
1094 | ||
1095 | /* | |
1096 | * Find the ram block that corresponds to the page requested to be | |
1097 | * transmitted by QEMU. | |
1098 | * | |
1099 | * Once the block is found, also identify which 'chunk' within that | |
1100 | * block that the page belongs to. | |
1101 | * | |
1102 | * This search cannot fail or the migration will fail. | |
1103 | */ | |
1104 | static int qemu_rdma_search_ram_block(RDMAContext *rdma, | |
1105 | uint64_t block_offset, | |
1106 | uint64_t offset, | |
1107 | uint64_t length, | |
1108 | uint64_t *block_index, | |
1109 | uint64_t *chunk_index) | |
1110 | { | |
1111 | uint64_t current_addr = block_offset + offset; | |
1112 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
1113 | (void *) block_offset); | |
1114 | assert(block); | |
1115 | assert(current_addr >= block->offset); | |
1116 | assert((current_addr + length) <= (block->offset + block->length)); | |
1117 | ||
1118 | *block_index = block->index; | |
1119 | *chunk_index = ram_chunk_index(block->local_host_addr, | |
1120 | block->local_host_addr + (current_addr - block->offset)); | |
1121 | ||
1122 | return 0; | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * Register a chunk with IB. If the chunk was already registered | |
1127 | * previously, then skip. | |
1128 | * | |
1129 | * Also return the keys associated with the registration needed | |
1130 | * to perform the actual RDMA operation. | |
1131 | */ | |
1132 | static int qemu_rdma_register_and_get_keys(RDMAContext *rdma, | |
1133 | RDMALocalBlock *block, uint8_t *host_addr, | |
1134 | uint32_t *lkey, uint32_t *rkey, int chunk, | |
1135 | uint8_t *chunk_start, uint8_t *chunk_end) | |
1136 | { | |
1137 | if (block->mr) { | |
1138 | if (lkey) { | |
1139 | *lkey = block->mr->lkey; | |
1140 | } | |
1141 | if (rkey) { | |
1142 | *rkey = block->mr->rkey; | |
1143 | } | |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | /* allocate memory to store chunk MRs */ | |
1148 | if (!block->pmr) { | |
1149 | block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *)); | |
1150 | if (!block->pmr) { | |
1151 | return -1; | |
1152 | } | |
1153 | } | |
1154 | ||
1155 | /* | |
1156 | * If 'rkey', then we're the destination, so grant access to the source. | |
1157 | * | |
1158 | * If 'lkey', then we're the source VM, so grant access only to ourselves. | |
1159 | */ | |
1160 | if (!block->pmr[chunk]) { | |
1161 | uint64_t len = chunk_end - chunk_start; | |
1162 | ||
1163 | DDPRINTF("Registering %" PRIu64 " bytes @ %p\n", | |
1164 | len, chunk_start); | |
1165 | ||
1166 | block->pmr[chunk] = ibv_reg_mr(rdma->pd, | |
1167 | chunk_start, len, | |
1168 | (rkey ? (IBV_ACCESS_LOCAL_WRITE | | |
1169 | IBV_ACCESS_REMOTE_WRITE) : 0)); | |
1170 | ||
1171 | if (!block->pmr[chunk]) { | |
1172 | perror("Failed to register chunk!"); | |
1173 | fprintf(stderr, "Chunk details: block: %d chunk index %d" | |
1174 | " start %" PRIu64 " end %" PRIu64 " host %" PRIu64 | |
1175 | " local %" PRIu64 " registrations: %d\n", | |
1176 | block->index, chunk, (uint64_t) chunk_start, | |
1177 | (uint64_t) chunk_end, (uint64_t) host_addr, | |
1178 | (uint64_t) block->local_host_addr, | |
1179 | rdma->total_registrations); | |
1180 | return -1; | |
1181 | } | |
1182 | rdma->total_registrations++; | |
1183 | } | |
1184 | ||
1185 | if (lkey) { | |
1186 | *lkey = block->pmr[chunk]->lkey; | |
1187 | } | |
1188 | if (rkey) { | |
1189 | *rkey = block->pmr[chunk]->rkey; | |
1190 | } | |
1191 | return 0; | |
1192 | } | |
1193 | ||
1194 | /* | |
1195 | * Register (at connection time) the memory used for control | |
1196 | * channel messages. | |
1197 | */ | |
1198 | static int qemu_rdma_reg_control(RDMAContext *rdma, int idx) | |
1199 | { | |
1200 | rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd, | |
1201 | rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER, | |
1202 | IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); | |
1203 | if (rdma->wr_data[idx].control_mr) { | |
1204 | rdma->total_registrations++; | |
1205 | return 0; | |
1206 | } | |
1207 | fprintf(stderr, "qemu_rdma_reg_control failed!\n"); | |
1208 | return -1; | |
1209 | } | |
1210 | ||
1211 | const char *print_wrid(int wrid) | |
1212 | { | |
1213 | if (wrid >= RDMA_WRID_RECV_CONTROL) { | |
1214 | return wrid_desc[RDMA_WRID_RECV_CONTROL]; | |
1215 | } | |
1216 | return wrid_desc[wrid]; | |
1217 | } | |
1218 | ||
1219 | /* | |
1220 | * RDMA requires memory registration (mlock/pinning), but this is not good for | |
1221 | * overcommitment. | |
1222 | * | |
1223 | * In preparation for the future where LRU information or workload-specific | |
1224 | * writable writable working set memory access behavior is available to QEMU | |
1225 | * it would be nice to have in place the ability to UN-register/UN-pin | |
1226 | * particular memory regions from the RDMA hardware when it is determine that | |
1227 | * those regions of memory will likely not be accessed again in the near future. | |
1228 | * | |
1229 | * While we do not yet have such information right now, the following | |
1230 | * compile-time option allows us to perform a non-optimized version of this | |
1231 | * behavior. | |
1232 | * | |
1233 | * By uncommenting this option, you will cause *all* RDMA transfers to be | |
1234 | * unregistered immediately after the transfer completes on both sides of the | |
1235 | * connection. This has no effect in 'rdma-pin-all' mode, only regular mode. | |
1236 | * | |
1237 | * This will have a terrible impact on migration performance, so until future | |
1238 | * workload information or LRU information is available, do not attempt to use | |
1239 | * this feature except for basic testing. | |
1240 | */ | |
1241 | //#define RDMA_UNREGISTRATION_EXAMPLE | |
1242 | ||
1243 | /* | |
1244 | * Perform a non-optimized memory unregistration after every transfer | |
1245 | * for demonsration purposes, only if pin-all is not requested. | |
1246 | * | |
1247 | * Potential optimizations: | |
1248 | * 1. Start a new thread to run this function continuously | |
1249 | - for bit clearing | |
1250 | - and for receipt of unregister messages | |
1251 | * 2. Use an LRU. | |
1252 | * 3. Use workload hints. | |
1253 | */ | |
1254 | static int qemu_rdma_unregister_waiting(RDMAContext *rdma) | |
1255 | { | |
1256 | while (rdma->unregistrations[rdma->unregister_current]) { | |
1257 | int ret; | |
1258 | uint64_t wr_id = rdma->unregistrations[rdma->unregister_current]; | |
1259 | uint64_t chunk = | |
1260 | (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; | |
1261 | uint64_t index = | |
1262 | (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; | |
1263 | RDMALocalBlock *block = | |
1264 | &(rdma->local_ram_blocks.block[index]); | |
1265 | RDMARegister reg = { .current_index = index }; | |
1266 | RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED, | |
1267 | }; | |
1268 | RDMAControlHeader head = { .len = sizeof(RDMARegister), | |
1269 | .type = RDMA_CONTROL_UNREGISTER_REQUEST, | |
1270 | .repeat = 1, | |
1271 | }; | |
1272 | ||
1273 | DDPRINTF("Processing unregister for chunk: %" PRIu64 | |
1274 | " at position %d\n", chunk, rdma->unregister_current); | |
1275 | ||
1276 | rdma->unregistrations[rdma->unregister_current] = 0; | |
1277 | rdma->unregister_current++; | |
1278 | ||
1279 | if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) { | |
1280 | rdma->unregister_current = 0; | |
1281 | } | |
1282 | ||
1283 | ||
1284 | /* | |
1285 | * Unregistration is speculative (because migration is single-threaded | |
1286 | * and we cannot break the protocol's inifinband message ordering). | |
1287 | * Thus, if the memory is currently being used for transmission, | |
1288 | * then abort the attempt to unregister and try again | |
1289 | * later the next time a completion is received for this memory. | |
1290 | */ | |
1291 | clear_bit(chunk, block->unregister_bitmap); | |
1292 | ||
1293 | if (test_bit(chunk, block->transit_bitmap)) { | |
1294 | DDPRINTF("Cannot unregister inflight chunk: %" PRIu64 "\n", chunk); | |
1295 | continue; | |
1296 | } | |
1297 | ||
1298 | DDPRINTF("Sending unregister for chunk: %" PRIu64 "\n", chunk); | |
1299 | ||
1300 | ret = ibv_dereg_mr(block->pmr[chunk]); | |
1301 | block->pmr[chunk] = NULL; | |
1302 | block->remote_keys[chunk] = 0; | |
1303 | ||
1304 | if (ret != 0) { | |
1305 | perror("unregistration chunk failed"); | |
1306 | return -ret; | |
1307 | } | |
1308 | rdma->total_registrations--; | |
1309 | ||
1310 | reg.key.chunk = chunk; | |
1311 | register_to_network(®); | |
1312 | ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, | |
1313 | &resp, NULL, NULL); | |
1314 | if (ret < 0) { | |
1315 | return ret; | |
1316 | } | |
1317 | ||
1318 | DDPRINTF("Unregister for chunk: %" PRIu64 " complete.\n", chunk); | |
1319 | } | |
1320 | ||
1321 | return 0; | |
1322 | } | |
1323 | ||
1324 | static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index, | |
1325 | uint64_t chunk) | |
1326 | { | |
1327 | uint64_t result = wr_id & RDMA_WRID_TYPE_MASK; | |
1328 | ||
1329 | result |= (index << RDMA_WRID_BLOCK_SHIFT); | |
1330 | result |= (chunk << RDMA_WRID_CHUNK_SHIFT); | |
1331 | ||
1332 | return result; | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Set bit for unregistration in the next iteration. | |
1337 | * We cannot transmit right here, but will unpin later. | |
1338 | */ | |
1339 | static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index, | |
1340 | uint64_t chunk, uint64_t wr_id) | |
1341 | { | |
1342 | if (rdma->unregistrations[rdma->unregister_next] != 0) { | |
1343 | fprintf(stderr, "rdma migration: queue is full!\n"); | |
1344 | } else { | |
1345 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); | |
1346 | ||
1347 | if (!test_and_set_bit(chunk, block->unregister_bitmap)) { | |
1348 | DDPRINTF("Appending unregister chunk %" PRIu64 | |
1349 | " at position %d\n", chunk, rdma->unregister_next); | |
1350 | ||
1351 | rdma->unregistrations[rdma->unregister_next++] = | |
1352 | qemu_rdma_make_wrid(wr_id, index, chunk); | |
1353 | ||
1354 | if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) { | |
1355 | rdma->unregister_next = 0; | |
1356 | } | |
1357 | } else { | |
1358 | DDPRINTF("Unregister chunk %" PRIu64 " already in queue.\n", | |
1359 | chunk); | |
1360 | } | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | /* | |
1365 | * Consult the connection manager to see a work request | |
1366 | * (of any kind) has completed. | |
1367 | * Return the work request ID that completed. | |
1368 | */ | |
88571882 IY |
1369 | static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, |
1370 | uint32_t *byte_len) | |
2da776db MH |
1371 | { |
1372 | int ret; | |
1373 | struct ibv_wc wc; | |
1374 | uint64_t wr_id; | |
1375 | ||
1376 | ret = ibv_poll_cq(rdma->cq, 1, &wc); | |
1377 | ||
1378 | if (!ret) { | |
1379 | *wr_id_out = RDMA_WRID_NONE; | |
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | if (ret < 0) { | |
1384 | fprintf(stderr, "ibv_poll_cq return %d!\n", ret); | |
1385 | return ret; | |
1386 | } | |
1387 | ||
1388 | wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK; | |
1389 | ||
1390 | if (wc.status != IBV_WC_SUCCESS) { | |
1391 | fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n", | |
1392 | wc.status, ibv_wc_status_str(wc.status)); | |
1393 | fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]); | |
1394 | ||
1395 | return -1; | |
1396 | } | |
1397 | ||
1398 | if (rdma->control_ready_expected && | |
1399 | (wr_id >= RDMA_WRID_RECV_CONTROL)) { | |
1400 | DDDPRINTF("completion %s #%" PRId64 " received (%" PRId64 ")" | |
1401 | " left %d\n", wrid_desc[RDMA_WRID_RECV_CONTROL], | |
1402 | wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent); | |
1403 | rdma->control_ready_expected = 0; | |
1404 | } | |
1405 | ||
1406 | if (wr_id == RDMA_WRID_RDMA_WRITE) { | |
1407 | uint64_t chunk = | |
1408 | (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; | |
1409 | uint64_t index = | |
1410 | (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; | |
1411 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); | |
1412 | ||
1413 | DDDPRINTF("completions %s (%" PRId64 ") left %d, " | |
1414 | "block %" PRIu64 ", chunk: %" PRIu64 " %p %p\n", | |
1415 | print_wrid(wr_id), wr_id, rdma->nb_sent, index, chunk, | |
1416 | block->local_host_addr, (void *)block->remote_host_addr); | |
1417 | ||
1418 | clear_bit(chunk, block->transit_bitmap); | |
1419 | ||
1420 | if (rdma->nb_sent > 0) { | |
1421 | rdma->nb_sent--; | |
1422 | } | |
1423 | ||
1424 | if (!rdma->pin_all) { | |
1425 | /* | |
1426 | * FYI: If one wanted to signal a specific chunk to be unregistered | |
1427 | * using LRU or workload-specific information, this is the function | |
1428 | * you would call to do so. That chunk would then get asynchronously | |
1429 | * unregistered later. | |
1430 | */ | |
1431 | #ifdef RDMA_UNREGISTRATION_EXAMPLE | |
1432 | qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id); | |
1433 | #endif | |
1434 | } | |
1435 | } else { | |
1436 | DDDPRINTF("other completion %s (%" PRId64 ") received left %d\n", | |
1437 | print_wrid(wr_id), wr_id, rdma->nb_sent); | |
1438 | } | |
1439 | ||
1440 | *wr_id_out = wc.wr_id; | |
88571882 IY |
1441 | if (byte_len) { |
1442 | *byte_len = wc.byte_len; | |
1443 | } | |
2da776db MH |
1444 | |
1445 | return 0; | |
1446 | } | |
1447 | ||
1448 | /* | |
1449 | * Block until the next work request has completed. | |
1450 | * | |
1451 | * First poll to see if a work request has already completed, | |
1452 | * otherwise block. | |
1453 | * | |
1454 | * If we encounter completed work requests for IDs other than | |
1455 | * the one we're interested in, then that's generally an error. | |
1456 | * | |
1457 | * The only exception is actual RDMA Write completions. These | |
1458 | * completions only need to be recorded, but do not actually | |
1459 | * need further processing. | |
1460 | */ | |
88571882 IY |
1461 | static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, |
1462 | uint32_t *byte_len) | |
2da776db MH |
1463 | { |
1464 | int num_cq_events = 0, ret = 0; | |
1465 | struct ibv_cq *cq; | |
1466 | void *cq_ctx; | |
1467 | uint64_t wr_id = RDMA_WRID_NONE, wr_id_in; | |
1468 | ||
1469 | if (ibv_req_notify_cq(rdma->cq, 0)) { | |
1470 | return -1; | |
1471 | } | |
1472 | /* poll cq first */ | |
1473 | while (wr_id != wrid_requested) { | |
88571882 | 1474 | ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); |
2da776db MH |
1475 | if (ret < 0) { |
1476 | return ret; | |
1477 | } | |
1478 | ||
1479 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
1480 | ||
1481 | if (wr_id == RDMA_WRID_NONE) { | |
1482 | break; | |
1483 | } | |
1484 | if (wr_id != wrid_requested) { | |
1485 | DDDPRINTF("A Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n", | |
1486 | print_wrid(wrid_requested), | |
1487 | wrid_requested, print_wrid(wr_id), wr_id); | |
1488 | } | |
1489 | } | |
1490 | ||
1491 | if (wr_id == wrid_requested) { | |
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | while (1) { | |
1496 | /* | |
1497 | * Coroutine doesn't start until process_incoming_migration() | |
1498 | * so don't yield unless we know we're running inside of a coroutine. | |
1499 | */ | |
1500 | if (rdma->migration_started_on_destination) { | |
1501 | yield_until_fd_readable(rdma->comp_channel->fd); | |
1502 | } | |
1503 | ||
1504 | if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) { | |
1505 | perror("ibv_get_cq_event"); | |
1506 | goto err_block_for_wrid; | |
1507 | } | |
1508 | ||
1509 | num_cq_events++; | |
1510 | ||
1511 | if (ibv_req_notify_cq(cq, 0)) { | |
1512 | goto err_block_for_wrid; | |
1513 | } | |
1514 | ||
1515 | while (wr_id != wrid_requested) { | |
88571882 | 1516 | ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); |
2da776db MH |
1517 | if (ret < 0) { |
1518 | goto err_block_for_wrid; | |
1519 | } | |
1520 | ||
1521 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
1522 | ||
1523 | if (wr_id == RDMA_WRID_NONE) { | |
1524 | break; | |
1525 | } | |
1526 | if (wr_id != wrid_requested) { | |
1527 | DDDPRINTF("B Wanted wrid %s (%d) but got %s (%" PRIu64 ")\n", | |
1528 | print_wrid(wrid_requested), wrid_requested, | |
1529 | print_wrid(wr_id), wr_id); | |
1530 | } | |
1531 | } | |
1532 | ||
1533 | if (wr_id == wrid_requested) { | |
1534 | goto success_block_for_wrid; | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | success_block_for_wrid: | |
1539 | if (num_cq_events) { | |
1540 | ibv_ack_cq_events(cq, num_cq_events); | |
1541 | } | |
1542 | return 0; | |
1543 | ||
1544 | err_block_for_wrid: | |
1545 | if (num_cq_events) { | |
1546 | ibv_ack_cq_events(cq, num_cq_events); | |
1547 | } | |
1548 | return ret; | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * Post a SEND message work request for the control channel | |
1553 | * containing some data and block until the post completes. | |
1554 | */ | |
1555 | static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf, | |
1556 | RDMAControlHeader *head) | |
1557 | { | |
1558 | int ret = 0; | |
1f22364b | 1559 | RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL]; |
2da776db MH |
1560 | struct ibv_send_wr *bad_wr; |
1561 | struct ibv_sge sge = { | |
1562 | .addr = (uint64_t)(wr->control), | |
1563 | .length = head->len + sizeof(RDMAControlHeader), | |
1564 | .lkey = wr->control_mr->lkey, | |
1565 | }; | |
1566 | struct ibv_send_wr send_wr = { | |
1567 | .wr_id = RDMA_WRID_SEND_CONTROL, | |
1568 | .opcode = IBV_WR_SEND, | |
1569 | .send_flags = IBV_SEND_SIGNALED, | |
1570 | .sg_list = &sge, | |
1571 | .num_sge = 1, | |
1572 | }; | |
1573 | ||
1574 | DDDPRINTF("CONTROL: sending %s..\n", control_desc[head->type]); | |
1575 | ||
1576 | /* | |
1577 | * We don't actually need to do a memcpy() in here if we used | |
1578 | * the "sge" properly, but since we're only sending control messages | |
1579 | * (not RAM in a performance-critical path), then its OK for now. | |
1580 | * | |
1581 | * The copy makes the RDMAControlHeader simpler to manipulate | |
1582 | * for the time being. | |
1583 | */ | |
6f1484ed | 1584 | assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head)); |
2da776db MH |
1585 | memcpy(wr->control, head, sizeof(RDMAControlHeader)); |
1586 | control_to_network((void *) wr->control); | |
1587 | ||
1588 | if (buf) { | |
1589 | memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len); | |
1590 | } | |
1591 | ||
1592 | ||
1593 | if (ibv_post_send(rdma->qp, &send_wr, &bad_wr)) { | |
1594 | return -1; | |
1595 | } | |
1596 | ||
1597 | if (ret < 0) { | |
1598 | fprintf(stderr, "Failed to use post IB SEND for control!\n"); | |
1599 | return ret; | |
1600 | } | |
1601 | ||
88571882 | 1602 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL); |
2da776db MH |
1603 | if (ret < 0) { |
1604 | fprintf(stderr, "rdma migration: send polling control error!\n"); | |
1605 | } | |
1606 | ||
1607 | return ret; | |
1608 | } | |
1609 | ||
1610 | /* | |
1611 | * Post a RECV work request in anticipation of some future receipt | |
1612 | * of data on the control channel. | |
1613 | */ | |
1614 | static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx) | |
1615 | { | |
1616 | struct ibv_recv_wr *bad_wr; | |
1617 | struct ibv_sge sge = { | |
1618 | .addr = (uint64_t)(rdma->wr_data[idx].control), | |
1619 | .length = RDMA_CONTROL_MAX_BUFFER, | |
1620 | .lkey = rdma->wr_data[idx].control_mr->lkey, | |
1621 | }; | |
1622 | ||
1623 | struct ibv_recv_wr recv_wr = { | |
1624 | .wr_id = RDMA_WRID_RECV_CONTROL + idx, | |
1625 | .sg_list = &sge, | |
1626 | .num_sge = 1, | |
1627 | }; | |
1628 | ||
1629 | ||
1630 | if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) { | |
1631 | return -1; | |
1632 | } | |
1633 | ||
1634 | return 0; | |
1635 | } | |
1636 | ||
1637 | /* | |
1638 | * Block and wait for a RECV control channel message to arrive. | |
1639 | */ | |
1640 | static int qemu_rdma_exchange_get_response(RDMAContext *rdma, | |
1641 | RDMAControlHeader *head, int expecting, int idx) | |
1642 | { | |
88571882 IY |
1643 | uint32_t byte_len; |
1644 | int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx, | |
1645 | &byte_len); | |
2da776db MH |
1646 | |
1647 | if (ret < 0) { | |
1648 | fprintf(stderr, "rdma migration: recv polling control error!\n"); | |
1649 | return ret; | |
1650 | } | |
1651 | ||
1652 | network_to_control((void *) rdma->wr_data[idx].control); | |
1653 | memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader)); | |
1654 | ||
1655 | DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]); | |
1656 | ||
1657 | if (expecting == RDMA_CONTROL_NONE) { | |
1658 | DDDPRINTF("Surprise: got %s (%d)\n", | |
1659 | control_desc[head->type], head->type); | |
1660 | } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) { | |
1661 | fprintf(stderr, "Was expecting a %s (%d) control message" | |
1662 | ", but got: %s (%d), length: %d\n", | |
1663 | control_desc[expecting], expecting, | |
1664 | control_desc[head->type], head->type, head->len); | |
1665 | return -EIO; | |
1666 | } | |
6f1484ed IY |
1667 | if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) { |
1668 | fprintf(stderr, "too long length: %d\n", head->len); | |
1669 | return -EINVAL; | |
1670 | } | |
88571882 IY |
1671 | if (sizeof(*head) + head->len != byte_len) { |
1672 | fprintf(stderr, "Malformed length: %d byte_len %d\n", | |
1673 | head->len, byte_len); | |
1674 | return -EINVAL; | |
1675 | } | |
2da776db MH |
1676 | |
1677 | return 0; | |
1678 | } | |
1679 | ||
1680 | /* | |
1681 | * When a RECV work request has completed, the work request's | |
1682 | * buffer is pointed at the header. | |
1683 | * | |
1684 | * This will advance the pointer to the data portion | |
1685 | * of the control message of the work request's buffer that | |
1686 | * was populated after the work request finished. | |
1687 | */ | |
1688 | static void qemu_rdma_move_header(RDMAContext *rdma, int idx, | |
1689 | RDMAControlHeader *head) | |
1690 | { | |
1691 | rdma->wr_data[idx].control_len = head->len; | |
1692 | rdma->wr_data[idx].control_curr = | |
1693 | rdma->wr_data[idx].control + sizeof(RDMAControlHeader); | |
1694 | } | |
1695 | ||
1696 | /* | |
1697 | * This is an 'atomic' high-level operation to deliver a single, unified | |
1698 | * control-channel message. | |
1699 | * | |
1700 | * Additionally, if the user is expecting some kind of reply to this message, | |
1701 | * they can request a 'resp' response message be filled in by posting an | |
1702 | * additional work request on behalf of the user and waiting for an additional | |
1703 | * completion. | |
1704 | * | |
1705 | * The extra (optional) response is used during registration to us from having | |
1706 | * to perform an *additional* exchange of message just to provide a response by | |
1707 | * instead piggy-backing on the acknowledgement. | |
1708 | */ | |
1709 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, | |
1710 | uint8_t *data, RDMAControlHeader *resp, | |
1711 | int *resp_idx, | |
1712 | int (*callback)(RDMAContext *rdma)) | |
1713 | { | |
1714 | int ret = 0; | |
1715 | ||
1716 | /* | |
1717 | * Wait until the dest is ready before attempting to deliver the message | |
1718 | * by waiting for a READY message. | |
1719 | */ | |
1720 | if (rdma->control_ready_expected) { | |
1721 | RDMAControlHeader resp; | |
1722 | ret = qemu_rdma_exchange_get_response(rdma, | |
1723 | &resp, RDMA_CONTROL_READY, RDMA_WRID_READY); | |
1724 | if (ret < 0) { | |
1725 | return ret; | |
1726 | } | |
1727 | } | |
1728 | ||
1729 | /* | |
1730 | * If the user is expecting a response, post a WR in anticipation of it. | |
1731 | */ | |
1732 | if (resp) { | |
1733 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA); | |
1734 | if (ret) { | |
1735 | fprintf(stderr, "rdma migration: error posting" | |
1736 | " extra control recv for anticipated result!"); | |
1737 | return ret; | |
1738 | } | |
1739 | } | |
1740 | ||
1741 | /* | |
1742 | * Post a WR to replace the one we just consumed for the READY message. | |
1743 | */ | |
1744 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); | |
1745 | if (ret) { | |
1746 | fprintf(stderr, "rdma migration: error posting first control recv!"); | |
1747 | return ret; | |
1748 | } | |
1749 | ||
1750 | /* | |
1751 | * Deliver the control message that was requested. | |
1752 | */ | |
1753 | ret = qemu_rdma_post_send_control(rdma, data, head); | |
1754 | ||
1755 | if (ret < 0) { | |
1756 | fprintf(stderr, "Failed to send control buffer!\n"); | |
1757 | return ret; | |
1758 | } | |
1759 | ||
1760 | /* | |
1761 | * If we're expecting a response, block and wait for it. | |
1762 | */ | |
1763 | if (resp) { | |
1764 | if (callback) { | |
1765 | DDPRINTF("Issuing callback before receiving response...\n"); | |
1766 | ret = callback(rdma); | |
1767 | if (ret < 0) { | |
1768 | return ret; | |
1769 | } | |
1770 | } | |
1771 | ||
1772 | DDPRINTF("Waiting for response %s\n", control_desc[resp->type]); | |
1773 | ret = qemu_rdma_exchange_get_response(rdma, resp, | |
1774 | resp->type, RDMA_WRID_DATA); | |
1775 | ||
1776 | if (ret < 0) { | |
1777 | return ret; | |
1778 | } | |
1779 | ||
1780 | qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp); | |
1781 | if (resp_idx) { | |
1782 | *resp_idx = RDMA_WRID_DATA; | |
1783 | } | |
1784 | DDPRINTF("Response %s received.\n", control_desc[resp->type]); | |
1785 | } | |
1786 | ||
1787 | rdma->control_ready_expected = 1; | |
1788 | ||
1789 | return 0; | |
1790 | } | |
1791 | ||
1792 | /* | |
1793 | * This is an 'atomic' high-level operation to receive a single, unified | |
1794 | * control-channel message. | |
1795 | */ | |
1796 | static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head, | |
1797 | int expecting) | |
1798 | { | |
1799 | RDMAControlHeader ready = { | |
1800 | .len = 0, | |
1801 | .type = RDMA_CONTROL_READY, | |
1802 | .repeat = 1, | |
1803 | }; | |
1804 | int ret; | |
1805 | ||
1806 | /* | |
1807 | * Inform the source that we're ready to receive a message. | |
1808 | */ | |
1809 | ret = qemu_rdma_post_send_control(rdma, NULL, &ready); | |
1810 | ||
1811 | if (ret < 0) { | |
1812 | fprintf(stderr, "Failed to send control buffer!\n"); | |
1813 | return ret; | |
1814 | } | |
1815 | ||
1816 | /* | |
1817 | * Block and wait for the message. | |
1818 | */ | |
1819 | ret = qemu_rdma_exchange_get_response(rdma, head, | |
1820 | expecting, RDMA_WRID_READY); | |
1821 | ||
1822 | if (ret < 0) { | |
1823 | return ret; | |
1824 | } | |
1825 | ||
1826 | qemu_rdma_move_header(rdma, RDMA_WRID_READY, head); | |
1827 | ||
1828 | /* | |
1829 | * Post a new RECV work request to replace the one we just consumed. | |
1830 | */ | |
1831 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); | |
1832 | if (ret) { | |
1833 | fprintf(stderr, "rdma migration: error posting second control recv!"); | |
1834 | return ret; | |
1835 | } | |
1836 | ||
1837 | return 0; | |
1838 | } | |
1839 | ||
1840 | /* | |
1841 | * Write an actual chunk of memory using RDMA. | |
1842 | * | |
1843 | * If we're using dynamic registration on the dest-side, we have to | |
1844 | * send a registration command first. | |
1845 | */ | |
1846 | static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma, | |
1847 | int current_index, uint64_t current_addr, | |
1848 | uint64_t length) | |
1849 | { | |
1850 | struct ibv_sge sge; | |
1851 | struct ibv_send_wr send_wr = { 0 }; | |
1852 | struct ibv_send_wr *bad_wr; | |
1853 | int reg_result_idx, ret, count = 0; | |
1854 | uint64_t chunk, chunks; | |
1855 | uint8_t *chunk_start, *chunk_end; | |
1856 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); | |
1857 | RDMARegister reg; | |
1858 | RDMARegisterResult *reg_result; | |
1859 | RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT }; | |
1860 | RDMAControlHeader head = { .len = sizeof(RDMARegister), | |
1861 | .type = RDMA_CONTROL_REGISTER_REQUEST, | |
1862 | .repeat = 1, | |
1863 | }; | |
1864 | ||
1865 | retry: | |
1866 | sge.addr = (uint64_t)(block->local_host_addr + | |
1867 | (current_addr - block->offset)); | |
1868 | sge.length = length; | |
1869 | ||
1870 | chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr); | |
1871 | chunk_start = ram_chunk_start(block, chunk); | |
1872 | ||
1873 | if (block->is_ram_block) { | |
1874 | chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); | |
1875 | ||
1876 | if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { | |
1877 | chunks--; | |
1878 | } | |
1879 | } else { | |
1880 | chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); | |
1881 | ||
1882 | if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { | |
1883 | chunks--; | |
1884 | } | |
1885 | } | |
1886 | ||
1887 | DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n", | |
1888 | chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); | |
1889 | ||
1890 | chunk_end = ram_chunk_end(block, chunk + chunks); | |
1891 | ||
1892 | if (!rdma->pin_all) { | |
1893 | #ifdef RDMA_UNREGISTRATION_EXAMPLE | |
1894 | qemu_rdma_unregister_waiting(rdma); | |
1895 | #endif | |
1896 | } | |
1897 | ||
1898 | while (test_bit(chunk, block->transit_bitmap)) { | |
1899 | (void)count; | |
1900 | DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64 | |
1901 | " current %" PRIu64 " len %" PRIu64 " %d %d\n", | |
1902 | count++, current_index, chunk, | |
1903 | sge.addr, length, rdma->nb_sent, block->nb_chunks); | |
1904 | ||
88571882 | 1905 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db MH |
1906 | |
1907 | if (ret < 0) { | |
1908 | fprintf(stderr, "Failed to Wait for previous write to complete " | |
1909 | "block %d chunk %" PRIu64 | |
1910 | " current %" PRIu64 " len %" PRIu64 " %d\n", | |
1911 | current_index, chunk, sge.addr, length, rdma->nb_sent); | |
1912 | return ret; | |
1913 | } | |
1914 | } | |
1915 | ||
1916 | if (!rdma->pin_all || !block->is_ram_block) { | |
1917 | if (!block->remote_keys[chunk]) { | |
1918 | /* | |
1919 | * This chunk has not yet been registered, so first check to see | |
1920 | * if the entire chunk is zero. If so, tell the other size to | |
1921 | * memset() + madvise() the entire chunk without RDMA. | |
1922 | */ | |
1923 | ||
1924 | if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length) | |
1925 | && buffer_find_nonzero_offset((void *)sge.addr, | |
1926 | length) == length) { | |
1927 | RDMACompress comp = { | |
1928 | .offset = current_addr, | |
1929 | .value = 0, | |
1930 | .block_idx = current_index, | |
1931 | .length = length, | |
1932 | }; | |
1933 | ||
1934 | head.len = sizeof(comp); | |
1935 | head.type = RDMA_CONTROL_COMPRESS; | |
1936 | ||
1937 | DDPRINTF("Entire chunk is zero, sending compress: %" | |
1938 | PRIu64 " for %d " | |
1939 | "bytes, index: %d, offset: %" PRId64 "...\n", | |
1940 | chunk, sge.length, current_index, current_addr); | |
1941 | ||
1942 | compress_to_network(&comp); | |
1943 | ret = qemu_rdma_exchange_send(rdma, &head, | |
1944 | (uint8_t *) &comp, NULL, NULL, NULL); | |
1945 | ||
1946 | if (ret < 0) { | |
1947 | return -EIO; | |
1948 | } | |
1949 | ||
1950 | acct_update_position(f, sge.length, true); | |
1951 | ||
1952 | return 1; | |
1953 | } | |
1954 | ||
1955 | /* | |
1956 | * Otherwise, tell other side to register. | |
1957 | */ | |
1958 | reg.current_index = current_index; | |
1959 | if (block->is_ram_block) { | |
1960 | reg.key.current_addr = current_addr; | |
1961 | } else { | |
1962 | reg.key.chunk = chunk; | |
1963 | } | |
1964 | reg.chunks = chunks; | |
1965 | ||
1966 | DDPRINTF("Sending registration request chunk %" PRIu64 " for %d " | |
1967 | "bytes, index: %d, offset: %" PRId64 "...\n", | |
1968 | chunk, sge.length, current_index, current_addr); | |
1969 | ||
1970 | register_to_network(®); | |
1971 | ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, | |
1972 | &resp, ®_result_idx, NULL); | |
1973 | if (ret < 0) { | |
1974 | return ret; | |
1975 | } | |
1976 | ||
1977 | /* try to overlap this single registration with the one we sent. */ | |
1978 | if (qemu_rdma_register_and_get_keys(rdma, block, | |
1979 | (uint8_t *) sge.addr, | |
1980 | &sge.lkey, NULL, chunk, | |
1981 | chunk_start, chunk_end)) { | |
1982 | fprintf(stderr, "cannot get lkey!\n"); | |
1983 | return -EINVAL; | |
1984 | } | |
1985 | ||
1986 | reg_result = (RDMARegisterResult *) | |
1987 | rdma->wr_data[reg_result_idx].control_curr; | |
1988 | ||
1989 | network_to_result(reg_result); | |
1990 | ||
1991 | DDPRINTF("Received registration result:" | |
1992 | " my key: %x their key %x, chunk %" PRIu64 "\n", | |
1993 | block->remote_keys[chunk], reg_result->rkey, chunk); | |
1994 | ||
1995 | block->remote_keys[chunk] = reg_result->rkey; | |
1996 | block->remote_host_addr = reg_result->host_addr; | |
1997 | } else { | |
1998 | /* already registered before */ | |
1999 | if (qemu_rdma_register_and_get_keys(rdma, block, | |
2000 | (uint8_t *)sge.addr, | |
2001 | &sge.lkey, NULL, chunk, | |
2002 | chunk_start, chunk_end)) { | |
2003 | fprintf(stderr, "cannot get lkey!\n"); | |
2004 | return -EINVAL; | |
2005 | } | |
2006 | } | |
2007 | ||
2008 | send_wr.wr.rdma.rkey = block->remote_keys[chunk]; | |
2009 | } else { | |
2010 | send_wr.wr.rdma.rkey = block->remote_rkey; | |
2011 | ||
2012 | if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr, | |
2013 | &sge.lkey, NULL, chunk, | |
2014 | chunk_start, chunk_end)) { | |
2015 | fprintf(stderr, "cannot get lkey!\n"); | |
2016 | return -EINVAL; | |
2017 | } | |
2018 | } | |
2019 | ||
2020 | /* | |
2021 | * Encode the ram block index and chunk within this wrid. | |
2022 | * We will use this information at the time of completion | |
2023 | * to figure out which bitmap to check against and then which | |
2024 | * chunk in the bitmap to look for. | |
2025 | */ | |
2026 | send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE, | |
2027 | current_index, chunk); | |
2028 | ||
2029 | send_wr.opcode = IBV_WR_RDMA_WRITE; | |
2030 | send_wr.send_flags = IBV_SEND_SIGNALED; | |
2031 | send_wr.sg_list = &sge; | |
2032 | send_wr.num_sge = 1; | |
2033 | send_wr.wr.rdma.remote_addr = block->remote_host_addr + | |
2034 | (current_addr - block->offset); | |
2035 | ||
2036 | DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx" | |
2037 | " remote: %lx, bytes %" PRIu32 "\n", | |
2038 | chunk, sge.addr, send_wr.wr.rdma.remote_addr, | |
2039 | sge.length); | |
2040 | ||
2041 | /* | |
2042 | * ibv_post_send() does not return negative error numbers, | |
2043 | * per the specification they are positive - no idea why. | |
2044 | */ | |
2045 | ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); | |
2046 | ||
2047 | if (ret == ENOMEM) { | |
2048 | DDPRINTF("send queue is full. wait a little....\n"); | |
88571882 | 2049 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db MH |
2050 | if (ret < 0) { |
2051 | fprintf(stderr, "rdma migration: failed to make " | |
2052 | "room in full send queue! %d\n", ret); | |
2053 | return ret; | |
2054 | } | |
2055 | ||
2056 | goto retry; | |
2057 | ||
2058 | } else if (ret > 0) { | |
2059 | perror("rdma migration: post rdma write failed"); | |
2060 | return -ret; | |
2061 | } | |
2062 | ||
2063 | set_bit(chunk, block->transit_bitmap); | |
2064 | acct_update_position(f, sge.length, false); | |
2065 | rdma->total_writes++; | |
2066 | ||
2067 | return 0; | |
2068 | } | |
2069 | ||
2070 | /* | |
2071 | * Push out any unwritten RDMA operations. | |
2072 | * | |
2073 | * We support sending out multiple chunks at the same time. | |
2074 | * Not all of them need to get signaled in the completion queue. | |
2075 | */ | |
2076 | static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma) | |
2077 | { | |
2078 | int ret; | |
2079 | ||
2080 | if (!rdma->current_length) { | |
2081 | return 0; | |
2082 | } | |
2083 | ||
2084 | ret = qemu_rdma_write_one(f, rdma, | |
2085 | rdma->current_index, rdma->current_addr, rdma->current_length); | |
2086 | ||
2087 | if (ret < 0) { | |
2088 | return ret; | |
2089 | } | |
2090 | ||
2091 | if (ret == 0) { | |
2092 | rdma->nb_sent++; | |
2093 | DDDPRINTF("sent total: %d\n", rdma->nb_sent); | |
2094 | } | |
2095 | ||
2096 | rdma->current_length = 0; | |
2097 | rdma->current_addr = 0; | |
2098 | ||
2099 | return 0; | |
2100 | } | |
2101 | ||
2102 | static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma, | |
2103 | uint64_t offset, uint64_t len) | |
2104 | { | |
44b59494 IY |
2105 | RDMALocalBlock *block; |
2106 | uint8_t *host_addr; | |
2107 | uint8_t *chunk_end; | |
2108 | ||
2109 | if (rdma->current_index < 0) { | |
2110 | return 0; | |
2111 | } | |
2112 | ||
2113 | if (rdma->current_chunk < 0) { | |
2114 | return 0; | |
2115 | } | |
2116 | ||
2117 | block = &(rdma->local_ram_blocks.block[rdma->current_index]); | |
2118 | host_addr = block->local_host_addr + (offset - block->offset); | |
2119 | chunk_end = ram_chunk_end(block, rdma->current_chunk); | |
2da776db MH |
2120 | |
2121 | if (rdma->current_length == 0) { | |
2122 | return 0; | |
2123 | } | |
2124 | ||
2125 | /* | |
2126 | * Only merge into chunk sequentially. | |
2127 | */ | |
2128 | if (offset != (rdma->current_addr + rdma->current_length)) { | |
2129 | return 0; | |
2130 | } | |
2131 | ||
2da776db MH |
2132 | if (offset < block->offset) { |
2133 | return 0; | |
2134 | } | |
2135 | ||
2136 | if ((offset + len) > (block->offset + block->length)) { | |
2137 | return 0; | |
2138 | } | |
2139 | ||
2da776db MH |
2140 | if ((host_addr + len) > chunk_end) { |
2141 | return 0; | |
2142 | } | |
2143 | ||
2144 | return 1; | |
2145 | } | |
2146 | ||
2147 | /* | |
2148 | * We're not actually writing here, but doing three things: | |
2149 | * | |
2150 | * 1. Identify the chunk the buffer belongs to. | |
2151 | * 2. If the chunk is full or the buffer doesn't belong to the current | |
2152 | * chunk, then start a new chunk and flush() the old chunk. | |
2153 | * 3. To keep the hardware busy, we also group chunks into batches | |
2154 | * and only require that a batch gets acknowledged in the completion | |
2155 | * qeueue instead of each individual chunk. | |
2156 | */ | |
2157 | static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma, | |
2158 | uint64_t block_offset, uint64_t offset, | |
2159 | uint64_t len) | |
2160 | { | |
2161 | uint64_t current_addr = block_offset + offset; | |
2162 | uint64_t index = rdma->current_index; | |
2163 | uint64_t chunk = rdma->current_chunk; | |
2164 | int ret; | |
2165 | ||
2166 | /* If we cannot merge it, we flush the current buffer first. */ | |
2167 | if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) { | |
2168 | ret = qemu_rdma_write_flush(f, rdma); | |
2169 | if (ret) { | |
2170 | return ret; | |
2171 | } | |
2172 | rdma->current_length = 0; | |
2173 | rdma->current_addr = current_addr; | |
2174 | ||
2175 | ret = qemu_rdma_search_ram_block(rdma, block_offset, | |
2176 | offset, len, &index, &chunk); | |
2177 | if (ret) { | |
2178 | fprintf(stderr, "ram block search failed\n"); | |
2179 | return ret; | |
2180 | } | |
2181 | rdma->current_index = index; | |
2182 | rdma->current_chunk = chunk; | |
2183 | } | |
2184 | ||
2185 | /* merge it */ | |
2186 | rdma->current_length += len; | |
2187 | ||
2188 | /* flush it if buffer is too large */ | |
2189 | if (rdma->current_length >= RDMA_MERGE_MAX) { | |
2190 | return qemu_rdma_write_flush(f, rdma); | |
2191 | } | |
2192 | ||
2193 | return 0; | |
2194 | } | |
2195 | ||
2196 | static void qemu_rdma_cleanup(RDMAContext *rdma) | |
2197 | { | |
2198 | struct rdma_cm_event *cm_event; | |
2199 | int ret, idx; | |
2200 | ||
5a91337c | 2201 | if (rdma->cm_id && rdma->connected) { |
2da776db MH |
2202 | if (rdma->error_state) { |
2203 | RDMAControlHeader head = { .len = 0, | |
2204 | .type = RDMA_CONTROL_ERROR, | |
2205 | .repeat = 1, | |
2206 | }; | |
2207 | fprintf(stderr, "Early error. Sending error.\n"); | |
2208 | qemu_rdma_post_send_control(rdma, NULL, &head); | |
2209 | } | |
2210 | ||
2211 | ret = rdma_disconnect(rdma->cm_id); | |
2212 | if (!ret) { | |
2213 | DDPRINTF("waiting for disconnect\n"); | |
2214 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2215 | if (!ret) { | |
2216 | rdma_ack_cm_event(cm_event); | |
2217 | } | |
2218 | } | |
2219 | DDPRINTF("Disconnected.\n"); | |
5a91337c | 2220 | rdma->connected = false; |
2da776db MH |
2221 | } |
2222 | ||
2223 | g_free(rdma->block); | |
2224 | rdma->block = NULL; | |
2225 | ||
1f22364b | 2226 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2227 | if (rdma->wr_data[idx].control_mr) { |
2228 | rdma->total_registrations--; | |
2229 | ibv_dereg_mr(rdma->wr_data[idx].control_mr); | |
2230 | } | |
2231 | rdma->wr_data[idx].control_mr = NULL; | |
2232 | } | |
2233 | ||
2234 | if (rdma->local_ram_blocks.block) { | |
2235 | while (rdma->local_ram_blocks.nb_blocks) { | |
2236 | __qemu_rdma_delete_block(rdma, | |
2237 | rdma->local_ram_blocks.block->offset); | |
2238 | } | |
2239 | } | |
2240 | ||
2241 | if (rdma->qp) { | |
5a91337c | 2242 | rdma_destroy_qp(rdma->cm_id); |
2da776db MH |
2243 | rdma->qp = NULL; |
2244 | } | |
2245 | if (rdma->cq) { | |
2246 | ibv_destroy_cq(rdma->cq); | |
2247 | rdma->cq = NULL; | |
2248 | } | |
2249 | if (rdma->comp_channel) { | |
2250 | ibv_destroy_comp_channel(rdma->comp_channel); | |
2251 | rdma->comp_channel = NULL; | |
2252 | } | |
2253 | if (rdma->pd) { | |
2254 | ibv_dealloc_pd(rdma->pd); | |
2255 | rdma->pd = NULL; | |
2256 | } | |
2257 | if (rdma->listen_id) { | |
2258 | rdma_destroy_id(rdma->listen_id); | |
2259 | rdma->listen_id = NULL; | |
2260 | } | |
2261 | if (rdma->cm_id) { | |
2262 | rdma_destroy_id(rdma->cm_id); | |
2263 | rdma->cm_id = NULL; | |
2264 | } | |
2265 | if (rdma->channel) { | |
2266 | rdma_destroy_event_channel(rdma->channel); | |
2267 | rdma->channel = NULL; | |
2268 | } | |
e1d0fb37 IY |
2269 | g_free(rdma->host); |
2270 | rdma->host = NULL; | |
2da776db MH |
2271 | } |
2272 | ||
2273 | ||
2274 | static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all) | |
2275 | { | |
2276 | int ret, idx; | |
2277 | Error *local_err = NULL, **temp = &local_err; | |
2278 | ||
2279 | /* | |
2280 | * Will be validated against destination's actual capabilities | |
2281 | * after the connect() completes. | |
2282 | */ | |
2283 | rdma->pin_all = pin_all; | |
2284 | ||
2285 | ret = qemu_rdma_resolve_host(rdma, temp); | |
2286 | if (ret) { | |
2287 | goto err_rdma_source_init; | |
2288 | } | |
2289 | ||
2290 | ret = qemu_rdma_alloc_pd_cq(rdma); | |
2291 | if (ret) { | |
2292 | ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()" | |
2293 | " limits may be too low. Please check $ ulimit -a # and " | |
66988941 | 2294 | "search for 'ulimit -l' in the output"); |
2da776db MH |
2295 | goto err_rdma_source_init; |
2296 | } | |
2297 | ||
2298 | ret = qemu_rdma_alloc_qp(rdma); | |
2299 | if (ret) { | |
66988941 | 2300 | ERROR(temp, "rdma migration: error allocating qp!"); |
2da776db MH |
2301 | goto err_rdma_source_init; |
2302 | } | |
2303 | ||
2304 | ret = qemu_rdma_init_ram_blocks(rdma); | |
2305 | if (ret) { | |
66988941 | 2306 | ERROR(temp, "rdma migration: error initializing ram blocks!"); |
2da776db MH |
2307 | goto err_rdma_source_init; |
2308 | } | |
2309 | ||
1f22364b | 2310 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2311 | ret = qemu_rdma_reg_control(rdma, idx); |
2312 | if (ret) { | |
66988941 | 2313 | ERROR(temp, "rdma migration: error registering %d control!", |
2da776db MH |
2314 | idx); |
2315 | goto err_rdma_source_init; | |
2316 | } | |
2317 | } | |
2318 | ||
2319 | return 0; | |
2320 | ||
2321 | err_rdma_source_init: | |
2322 | error_propagate(errp, local_err); | |
2323 | qemu_rdma_cleanup(rdma); | |
2324 | return -1; | |
2325 | } | |
2326 | ||
2327 | static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) | |
2328 | { | |
2329 | RDMACapabilities cap = { | |
2330 | .version = RDMA_CONTROL_VERSION_CURRENT, | |
2331 | .flags = 0, | |
2332 | }; | |
2333 | struct rdma_conn_param conn_param = { .initiator_depth = 2, | |
2334 | .retry_count = 5, | |
2335 | .private_data = &cap, | |
2336 | .private_data_len = sizeof(cap), | |
2337 | }; | |
2338 | struct rdma_cm_event *cm_event; | |
2339 | int ret; | |
2340 | ||
2341 | /* | |
2342 | * Only negotiate the capability with destination if the user | |
2343 | * on the source first requested the capability. | |
2344 | */ | |
2345 | if (rdma->pin_all) { | |
2346 | DPRINTF("Server pin-all memory requested.\n"); | |
2347 | cap.flags |= RDMA_CAPABILITY_PIN_ALL; | |
2348 | } | |
2349 | ||
2350 | caps_to_network(&cap); | |
2351 | ||
2352 | ret = rdma_connect(rdma->cm_id, &conn_param); | |
2353 | if (ret) { | |
2354 | perror("rdma_connect"); | |
66988941 | 2355 | ERROR(errp, "connecting to destination!"); |
2da776db MH |
2356 | rdma_destroy_id(rdma->cm_id); |
2357 | rdma->cm_id = NULL; | |
2358 | goto err_rdma_source_connect; | |
2359 | } | |
2360 | ||
2361 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2362 | if (ret) { | |
2363 | perror("rdma_get_cm_event after rdma_connect"); | |
66988941 | 2364 | ERROR(errp, "connecting to destination!"); |
2da776db MH |
2365 | rdma_ack_cm_event(cm_event); |
2366 | rdma_destroy_id(rdma->cm_id); | |
2367 | rdma->cm_id = NULL; | |
2368 | goto err_rdma_source_connect; | |
2369 | } | |
2370 | ||
2371 | if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { | |
2372 | perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect"); | |
66988941 | 2373 | ERROR(errp, "connecting to destination!"); |
2da776db MH |
2374 | rdma_ack_cm_event(cm_event); |
2375 | rdma_destroy_id(rdma->cm_id); | |
2376 | rdma->cm_id = NULL; | |
2377 | goto err_rdma_source_connect; | |
2378 | } | |
5a91337c | 2379 | rdma->connected = true; |
2da776db MH |
2380 | |
2381 | memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); | |
2382 | network_to_caps(&cap); | |
2383 | ||
2384 | /* | |
2385 | * Verify that the *requested* capabilities are supported by the destination | |
2386 | * and disable them otherwise. | |
2387 | */ | |
2388 | if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) { | |
2389 | ERROR(errp, "Server cannot support pinning all memory. " | |
66988941 | 2390 | "Will register memory dynamically."); |
2da776db MH |
2391 | rdma->pin_all = false; |
2392 | } | |
2393 | ||
2394 | DPRINTF("Pin all memory: %s\n", rdma->pin_all ? "enabled" : "disabled"); | |
2395 | ||
2396 | rdma_ack_cm_event(cm_event); | |
2397 | ||
87772639 | 2398 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); |
2da776db | 2399 | if (ret) { |
66988941 | 2400 | ERROR(errp, "posting second control recv!"); |
2da776db MH |
2401 | goto err_rdma_source_connect; |
2402 | } | |
2403 | ||
2404 | rdma->control_ready_expected = 1; | |
2405 | rdma->nb_sent = 0; | |
2406 | return 0; | |
2407 | ||
2408 | err_rdma_source_connect: | |
2409 | qemu_rdma_cleanup(rdma); | |
2410 | return -1; | |
2411 | } | |
2412 | ||
2413 | static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) | |
2414 | { | |
2415 | int ret = -EINVAL, idx; | |
2da776db MH |
2416 | struct rdma_cm_id *listen_id; |
2417 | char ip[40] = "unknown"; | |
7fc5b13f | 2418 | struct rdma_addrinfo *res; |
b58c8552 | 2419 | char port_str[16]; |
2da776db | 2420 | |
1f22364b | 2421 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2422 | rdma->wr_data[idx].control_len = 0; |
2423 | rdma->wr_data[idx].control_curr = NULL; | |
2424 | } | |
2425 | ||
2426 | if (rdma->host == NULL) { | |
66988941 | 2427 | ERROR(errp, "RDMA host is not set!"); |
2da776db MH |
2428 | rdma->error_state = -EINVAL; |
2429 | return -1; | |
2430 | } | |
2431 | /* create CM channel */ | |
2432 | rdma->channel = rdma_create_event_channel(); | |
2433 | if (!rdma->channel) { | |
66988941 | 2434 | ERROR(errp, "could not create rdma event channel"); |
2da776db MH |
2435 | rdma->error_state = -EINVAL; |
2436 | return -1; | |
2437 | } | |
2438 | ||
2439 | /* create CM id */ | |
2440 | ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP); | |
2441 | if (ret) { | |
66988941 | 2442 | ERROR(errp, "could not create cm_id!"); |
2da776db MH |
2443 | goto err_dest_init_create_listen_id; |
2444 | } | |
2445 | ||
b58c8552 MH |
2446 | snprintf(port_str, 16, "%d", rdma->port); |
2447 | port_str[15] = '\0'; | |
2da776db MH |
2448 | |
2449 | if (rdma->host && strcmp("", rdma->host)) { | |
7fc5b13f | 2450 | struct rdma_addrinfo *e; |
6470215b | 2451 | |
7fc5b13f | 2452 | ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res); |
b58c8552 | 2453 | if (ret < 0) { |
7fc5b13f | 2454 | ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host); |
2da776db MH |
2455 | goto err_dest_init_bind_addr; |
2456 | } | |
b58c8552 | 2457 | |
6470215b MH |
2458 | for (e = res; e != NULL; e = e->ai_next) { |
2459 | inet_ntop(e->ai_family, | |
7fc5b13f | 2460 | &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); |
6470215b | 2461 | DPRINTF("Trying %s => %s\n", rdma->host, ip); |
7fc5b13f | 2462 | ret = rdma_bind_addr(listen_id, e->ai_dst_addr); |
6470215b | 2463 | if (!ret) { |
7fc5b13f MH |
2464 | if (e->ai_family == AF_INET6) { |
2465 | ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs); | |
2466 | if (ret) { | |
2467 | continue; | |
2468 | } | |
2469 | } | |
2470 | ||
6470215b MH |
2471 | goto listen; |
2472 | } | |
2473 | } | |
b58c8552 | 2474 | |
6470215b MH |
2475 | ERROR(errp, "Error: could not rdma_bind_addr!"); |
2476 | goto err_dest_init_bind_addr; | |
2da776db | 2477 | } else { |
66988941 | 2478 | ERROR(errp, "migration host and port not specified!"); |
b58c8552 MH |
2479 | ret = -EINVAL; |
2480 | goto err_dest_init_bind_addr; | |
2da776db | 2481 | } |
6470215b | 2482 | listen: |
2da776db MH |
2483 | |
2484 | rdma->listen_id = listen_id; | |
2485 | qemu_rdma_dump_gid("dest_init", listen_id); | |
2486 | return 0; | |
2487 | ||
2488 | err_dest_init_bind_addr: | |
2489 | rdma_destroy_id(listen_id); | |
2490 | err_dest_init_create_listen_id: | |
2491 | rdma_destroy_event_channel(rdma->channel); | |
2492 | rdma->channel = NULL; | |
2493 | rdma->error_state = ret; | |
2494 | return ret; | |
2495 | ||
2496 | } | |
2497 | ||
2498 | static void *qemu_rdma_data_init(const char *host_port, Error **errp) | |
2499 | { | |
2500 | RDMAContext *rdma = NULL; | |
2501 | InetSocketAddress *addr; | |
2502 | ||
2503 | if (host_port) { | |
2504 | rdma = g_malloc0(sizeof(RDMAContext)); | |
2505 | memset(rdma, 0, sizeof(RDMAContext)); | |
2506 | rdma->current_index = -1; | |
2507 | rdma->current_chunk = -1; | |
2508 | ||
2509 | addr = inet_parse(host_port, NULL); | |
2510 | if (addr != NULL) { | |
2511 | rdma->port = atoi(addr->port); | |
2512 | rdma->host = g_strdup(addr->host); | |
2513 | } else { | |
2514 | ERROR(errp, "bad RDMA migration address '%s'", host_port); | |
2515 | g_free(rdma); | |
2516 | return NULL; | |
2517 | } | |
2518 | } | |
2519 | ||
2520 | return rdma; | |
2521 | } | |
2522 | ||
2523 | /* | |
2524 | * QEMUFile interface to the control channel. | |
2525 | * SEND messages for control only. | |
2526 | * pc.ram is handled with regular RDMA messages. | |
2527 | */ | |
2528 | static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, | |
2529 | int64_t pos, int size) | |
2530 | { | |
2531 | QEMUFileRDMA *r = opaque; | |
2532 | QEMUFile *f = r->file; | |
2533 | RDMAContext *rdma = r->rdma; | |
2534 | size_t remaining = size; | |
2535 | uint8_t * data = (void *) buf; | |
2536 | int ret; | |
2537 | ||
2538 | CHECK_ERROR_STATE(); | |
2539 | ||
2540 | /* | |
2541 | * Push out any writes that | |
2542 | * we're queued up for pc.ram. | |
2543 | */ | |
2544 | ret = qemu_rdma_write_flush(f, rdma); | |
2545 | if (ret < 0) { | |
2546 | rdma->error_state = ret; | |
2547 | return ret; | |
2548 | } | |
2549 | ||
2550 | while (remaining) { | |
2551 | RDMAControlHeader head; | |
2552 | ||
2553 | r->len = MIN(remaining, RDMA_SEND_INCREMENT); | |
2554 | remaining -= r->len; | |
2555 | ||
2556 | head.len = r->len; | |
2557 | head.type = RDMA_CONTROL_QEMU_FILE; | |
2558 | ||
2559 | ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL); | |
2560 | ||
2561 | if (ret < 0) { | |
2562 | rdma->error_state = ret; | |
2563 | return ret; | |
2564 | } | |
2565 | ||
2566 | data += r->len; | |
2567 | } | |
2568 | ||
2569 | return size; | |
2570 | } | |
2571 | ||
2572 | static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf, | |
2573 | int size, int idx) | |
2574 | { | |
2575 | size_t len = 0; | |
2576 | ||
2577 | if (rdma->wr_data[idx].control_len) { | |
2578 | DDDPRINTF("RDMA %" PRId64 " of %d bytes already in buffer\n", | |
2579 | rdma->wr_data[idx].control_len, size); | |
2580 | ||
2581 | len = MIN(size, rdma->wr_data[idx].control_len); | |
2582 | memcpy(buf, rdma->wr_data[idx].control_curr, len); | |
2583 | rdma->wr_data[idx].control_curr += len; | |
2584 | rdma->wr_data[idx].control_len -= len; | |
2585 | } | |
2586 | ||
2587 | return len; | |
2588 | } | |
2589 | ||
2590 | /* | |
2591 | * QEMUFile interface to the control channel. | |
2592 | * RDMA links don't use bytestreams, so we have to | |
2593 | * return bytes to QEMUFile opportunistically. | |
2594 | */ | |
2595 | static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf, | |
2596 | int64_t pos, int size) | |
2597 | { | |
2598 | QEMUFileRDMA *r = opaque; | |
2599 | RDMAContext *rdma = r->rdma; | |
2600 | RDMAControlHeader head; | |
2601 | int ret = 0; | |
2602 | ||
2603 | CHECK_ERROR_STATE(); | |
2604 | ||
2605 | /* | |
2606 | * First, we hold on to the last SEND message we | |
2607 | * were given and dish out the bytes until we run | |
2608 | * out of bytes. | |
2609 | */ | |
2610 | r->len = qemu_rdma_fill(r->rdma, buf, size, 0); | |
2611 | if (r->len) { | |
2612 | return r->len; | |
2613 | } | |
2614 | ||
2615 | /* | |
2616 | * Once we run out, we block and wait for another | |
2617 | * SEND message to arrive. | |
2618 | */ | |
2619 | ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE); | |
2620 | ||
2621 | if (ret < 0) { | |
2622 | rdma->error_state = ret; | |
2623 | return ret; | |
2624 | } | |
2625 | ||
2626 | /* | |
2627 | * SEND was received with new bytes, now try again. | |
2628 | */ | |
2629 | return qemu_rdma_fill(r->rdma, buf, size, 0); | |
2630 | } | |
2631 | ||
2632 | /* | |
2633 | * Block until all the outstanding chunks have been delivered by the hardware. | |
2634 | */ | |
2635 | static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma) | |
2636 | { | |
2637 | int ret; | |
2638 | ||
2639 | if (qemu_rdma_write_flush(f, rdma) < 0) { | |
2640 | return -EIO; | |
2641 | } | |
2642 | ||
2643 | while (rdma->nb_sent) { | |
88571882 | 2644 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db MH |
2645 | if (ret < 0) { |
2646 | fprintf(stderr, "rdma migration: complete polling error!\n"); | |
2647 | return -EIO; | |
2648 | } | |
2649 | } | |
2650 | ||
2651 | qemu_rdma_unregister_waiting(rdma); | |
2652 | ||
2653 | return 0; | |
2654 | } | |
2655 | ||
2656 | static int qemu_rdma_close(void *opaque) | |
2657 | { | |
2658 | DPRINTF("Shutting down connection.\n"); | |
2659 | QEMUFileRDMA *r = opaque; | |
2660 | if (r->rdma) { | |
2661 | qemu_rdma_cleanup(r->rdma); | |
2662 | g_free(r->rdma); | |
2663 | } | |
2664 | g_free(r); | |
2665 | return 0; | |
2666 | } | |
2667 | ||
2668 | /* | |
2669 | * Parameters: | |
2670 | * @offset == 0 : | |
2671 | * This means that 'block_offset' is a full virtual address that does not | |
2672 | * belong to a RAMBlock of the virtual machine and instead | |
2673 | * represents a private malloc'd memory area that the caller wishes to | |
2674 | * transfer. | |
2675 | * | |
2676 | * @offset != 0 : | |
2677 | * Offset is an offset to be added to block_offset and used | |
2678 | * to also lookup the corresponding RAMBlock. | |
2679 | * | |
2680 | * @size > 0 : | |
2681 | * Initiate an transfer this size. | |
2682 | * | |
2683 | * @size == 0 : | |
2684 | * A 'hint' or 'advice' that means that we wish to speculatively | |
2685 | * and asynchronously unregister this memory. In this case, there is no | |
52f35022 | 2686 | * guarantee that the unregister will actually happen, for example, |
2da776db MH |
2687 | * if the memory is being actively transmitted. Additionally, the memory |
2688 | * may be re-registered at any future time if a write within the same | |
2689 | * chunk was requested again, even if you attempted to unregister it | |
2690 | * here. | |
2691 | * | |
2692 | * @size < 0 : TODO, not yet supported | |
2693 | * Unregister the memory NOW. This means that the caller does not | |
2694 | * expect there to be any future RDMA transfers and we just want to clean | |
2695 | * things up. This is used in case the upper layer owns the memory and | |
2696 | * cannot wait for qemu_fclose() to occur. | |
2697 | * | |
2698 | * @bytes_sent : User-specificed pointer to indicate how many bytes were | |
2699 | * sent. Usually, this will not be more than a few bytes of | |
2700 | * the protocol because most transfers are sent asynchronously. | |
2701 | */ | |
2702 | static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, | |
2703 | ram_addr_t block_offset, ram_addr_t offset, | |
2704 | size_t size, int *bytes_sent) | |
2705 | { | |
2706 | QEMUFileRDMA *rfile = opaque; | |
2707 | RDMAContext *rdma = rfile->rdma; | |
2708 | int ret; | |
2709 | ||
2710 | CHECK_ERROR_STATE(); | |
2711 | ||
2712 | qemu_fflush(f); | |
2713 | ||
2714 | if (size > 0) { | |
2715 | /* | |
2716 | * Add this page to the current 'chunk'. If the chunk | |
2717 | * is full, or the page doen't belong to the current chunk, | |
2718 | * an actual RDMA write will occur and a new chunk will be formed. | |
2719 | */ | |
2720 | ret = qemu_rdma_write(f, rdma, block_offset, offset, size); | |
2721 | if (ret < 0) { | |
2722 | fprintf(stderr, "rdma migration: write error! %d\n", ret); | |
2723 | goto err; | |
2724 | } | |
2725 | ||
2726 | /* | |
2727 | * We always return 1 bytes because the RDMA | |
2728 | * protocol is completely asynchronous. We do not yet know | |
2729 | * whether an identified chunk is zero or not because we're | |
2730 | * waiting for other pages to potentially be merged with | |
2731 | * the current chunk. So, we have to call qemu_update_position() | |
2732 | * later on when the actual write occurs. | |
2733 | */ | |
2734 | if (bytes_sent) { | |
2735 | *bytes_sent = 1; | |
2736 | } | |
2737 | } else { | |
2738 | uint64_t index, chunk; | |
2739 | ||
2740 | /* TODO: Change QEMUFileOps prototype to be signed: size_t => long | |
2741 | if (size < 0) { | |
2742 | ret = qemu_rdma_drain_cq(f, rdma); | |
2743 | if (ret < 0) { | |
2744 | fprintf(stderr, "rdma: failed to synchronously drain" | |
2745 | " completion queue before unregistration.\n"); | |
2746 | goto err; | |
2747 | } | |
2748 | } | |
2749 | */ | |
2750 | ||
2751 | ret = qemu_rdma_search_ram_block(rdma, block_offset, | |
2752 | offset, size, &index, &chunk); | |
2753 | ||
2754 | if (ret) { | |
2755 | fprintf(stderr, "ram block search failed\n"); | |
2756 | goto err; | |
2757 | } | |
2758 | ||
2759 | qemu_rdma_signal_unregister(rdma, index, chunk, 0); | |
2760 | ||
2761 | /* | |
52f35022 | 2762 | * TODO: Synchronous, guaranteed unregistration (should not occur during |
2da776db MH |
2763 | * fast-path). Otherwise, unregisters will process on the next call to |
2764 | * qemu_rdma_drain_cq() | |
2765 | if (size < 0) { | |
2766 | qemu_rdma_unregister_waiting(rdma); | |
2767 | } | |
2768 | */ | |
2769 | } | |
2770 | ||
2771 | /* | |
2772 | * Drain the Completion Queue if possible, but do not block, | |
2773 | * just poll. | |
2774 | * | |
2775 | * If nothing to poll, the end of the iteration will do this | |
2776 | * again to make sure we don't overflow the request queue. | |
2777 | */ | |
2778 | while (1) { | |
2779 | uint64_t wr_id, wr_id_in; | |
88571882 | 2780 | int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL); |
2da776db MH |
2781 | if (ret < 0) { |
2782 | fprintf(stderr, "rdma migration: polling error! %d\n", ret); | |
2783 | goto err; | |
2784 | } | |
2785 | ||
2786 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
2787 | ||
2788 | if (wr_id == RDMA_WRID_NONE) { | |
2789 | break; | |
2790 | } | |
2791 | } | |
2792 | ||
2793 | return RAM_SAVE_CONTROL_DELAYED; | |
2794 | err: | |
2795 | rdma->error_state = ret; | |
2796 | return ret; | |
2797 | } | |
2798 | ||
2799 | static int qemu_rdma_accept(RDMAContext *rdma) | |
2800 | { | |
2801 | RDMACapabilities cap; | |
2802 | struct rdma_conn_param conn_param = { | |
2803 | .responder_resources = 2, | |
2804 | .private_data = &cap, | |
2805 | .private_data_len = sizeof(cap), | |
2806 | }; | |
2807 | struct rdma_cm_event *cm_event; | |
2808 | struct ibv_context *verbs; | |
2809 | int ret = -EINVAL; | |
2810 | int idx; | |
2811 | ||
2812 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2813 | if (ret) { | |
2814 | goto err_rdma_dest_wait; | |
2815 | } | |
2816 | ||
2817 | if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) { | |
2818 | rdma_ack_cm_event(cm_event); | |
2819 | goto err_rdma_dest_wait; | |
2820 | } | |
2821 | ||
2822 | memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); | |
2823 | ||
2824 | network_to_caps(&cap); | |
2825 | ||
2826 | if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) { | |
2827 | fprintf(stderr, "Unknown source RDMA version: %d, bailing...\n", | |
2828 | cap.version); | |
2829 | rdma_ack_cm_event(cm_event); | |
2830 | goto err_rdma_dest_wait; | |
2831 | } | |
2832 | ||
2833 | /* | |
2834 | * Respond with only the capabilities this version of QEMU knows about. | |
2835 | */ | |
2836 | cap.flags &= known_capabilities; | |
2837 | ||
2838 | /* | |
2839 | * Enable the ones that we do know about. | |
2840 | * Add other checks here as new ones are introduced. | |
2841 | */ | |
2842 | if (cap.flags & RDMA_CAPABILITY_PIN_ALL) { | |
2843 | rdma->pin_all = true; | |
2844 | } | |
2845 | ||
2846 | rdma->cm_id = cm_event->id; | |
2847 | verbs = cm_event->id->verbs; | |
2848 | ||
2849 | rdma_ack_cm_event(cm_event); | |
2850 | ||
2851 | DPRINTF("Memory pin all: %s\n", rdma->pin_all ? "enabled" : "disabled"); | |
2852 | ||
2853 | caps_to_network(&cap); | |
2854 | ||
2855 | DPRINTF("verbs context after listen: %p\n", verbs); | |
2856 | ||
2857 | if (!rdma->verbs) { | |
2858 | rdma->verbs = verbs; | |
2859 | } else if (rdma->verbs != verbs) { | |
2860 | fprintf(stderr, "ibv context not matching %p, %p!\n", | |
2861 | rdma->verbs, verbs); | |
2862 | goto err_rdma_dest_wait; | |
2863 | } | |
2864 | ||
2865 | qemu_rdma_dump_id("dest_init", verbs); | |
2866 | ||
2867 | ret = qemu_rdma_alloc_pd_cq(rdma); | |
2868 | if (ret) { | |
2869 | fprintf(stderr, "rdma migration: error allocating pd and cq!\n"); | |
2870 | goto err_rdma_dest_wait; | |
2871 | } | |
2872 | ||
2873 | ret = qemu_rdma_alloc_qp(rdma); | |
2874 | if (ret) { | |
2875 | fprintf(stderr, "rdma migration: error allocating qp!\n"); | |
2876 | goto err_rdma_dest_wait; | |
2877 | } | |
2878 | ||
2879 | ret = qemu_rdma_init_ram_blocks(rdma); | |
2880 | if (ret) { | |
2881 | fprintf(stderr, "rdma migration: error initializing ram blocks!\n"); | |
2882 | goto err_rdma_dest_wait; | |
2883 | } | |
2884 | ||
1f22364b | 2885 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2886 | ret = qemu_rdma_reg_control(rdma, idx); |
2887 | if (ret) { | |
2888 | fprintf(stderr, "rdma: error registering %d control!\n", idx); | |
2889 | goto err_rdma_dest_wait; | |
2890 | } | |
2891 | } | |
2892 | ||
2893 | qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL); | |
2894 | ||
2895 | ret = rdma_accept(rdma->cm_id, &conn_param); | |
2896 | if (ret) { | |
2897 | fprintf(stderr, "rdma_accept returns %d!\n", ret); | |
2898 | goto err_rdma_dest_wait; | |
2899 | } | |
2900 | ||
2901 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2902 | if (ret) { | |
2903 | fprintf(stderr, "rdma_accept get_cm_event failed %d!\n", ret); | |
2904 | goto err_rdma_dest_wait; | |
2905 | } | |
2906 | ||
2907 | if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { | |
2908 | fprintf(stderr, "rdma_accept not event established!\n"); | |
2909 | rdma_ack_cm_event(cm_event); | |
2910 | goto err_rdma_dest_wait; | |
2911 | } | |
2912 | ||
2913 | rdma_ack_cm_event(cm_event); | |
5a91337c | 2914 | rdma->connected = true; |
2da776db | 2915 | |
87772639 | 2916 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); |
2da776db MH |
2917 | if (ret) { |
2918 | fprintf(stderr, "rdma migration: error posting second control recv!\n"); | |
2919 | goto err_rdma_dest_wait; | |
2920 | } | |
2921 | ||
2922 | qemu_rdma_dump_gid("dest_connect", rdma->cm_id); | |
2923 | ||
2924 | return 0; | |
2925 | ||
2926 | err_rdma_dest_wait: | |
2927 | rdma->error_state = ret; | |
2928 | qemu_rdma_cleanup(rdma); | |
2929 | return ret; | |
2930 | } | |
2931 | ||
2932 | /* | |
2933 | * During each iteration of the migration, we listen for instructions | |
2934 | * by the source VM to perform dynamic page registrations before they | |
2935 | * can perform RDMA operations. | |
2936 | * | |
2937 | * We respond with the 'rkey'. | |
2938 | * | |
2939 | * Keep doing this until the source tells us to stop. | |
2940 | */ | |
2941 | static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque, | |
2942 | uint64_t flags) | |
2943 | { | |
2944 | RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult), | |
2945 | .type = RDMA_CONTROL_REGISTER_RESULT, | |
2946 | .repeat = 0, | |
2947 | }; | |
2948 | RDMAControlHeader unreg_resp = { .len = 0, | |
2949 | .type = RDMA_CONTROL_UNREGISTER_FINISHED, | |
2950 | .repeat = 0, | |
2951 | }; | |
2952 | RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT, | |
2953 | .repeat = 1 }; | |
2954 | QEMUFileRDMA *rfile = opaque; | |
2955 | RDMAContext *rdma = rfile->rdma; | |
2956 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
2957 | RDMAControlHeader head; | |
2958 | RDMARegister *reg, *registers; | |
2959 | RDMACompress *comp; | |
2960 | RDMARegisterResult *reg_result; | |
2961 | static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE]; | |
2962 | RDMALocalBlock *block; | |
2963 | void *host_addr; | |
2964 | int ret = 0; | |
2965 | int idx = 0; | |
2966 | int count = 0; | |
2967 | int i = 0; | |
2968 | ||
2969 | CHECK_ERROR_STATE(); | |
2970 | ||
2971 | do { | |
2972 | DDDPRINTF("Waiting for next request %" PRIu64 "...\n", flags); | |
2973 | ||
2974 | ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE); | |
2975 | ||
2976 | if (ret < 0) { | |
2977 | break; | |
2978 | } | |
2979 | ||
2980 | if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) { | |
2981 | fprintf(stderr, "rdma: Too many requests in this message (%d)." | |
2982 | "Bailing.\n", head.repeat); | |
2983 | ret = -EIO; | |
2984 | break; | |
2985 | } | |
2986 | ||
2987 | switch (head.type) { | |
2988 | case RDMA_CONTROL_COMPRESS: | |
2989 | comp = (RDMACompress *) rdma->wr_data[idx].control_curr; | |
2990 | network_to_compress(comp); | |
2991 | ||
2992 | DDPRINTF("Zapping zero chunk: %" PRId64 | |
2993 | " bytes, index %d, offset %" PRId64 "\n", | |
2994 | comp->length, comp->block_idx, comp->offset); | |
2995 | block = &(rdma->local_ram_blocks.block[comp->block_idx]); | |
2996 | ||
2997 | host_addr = block->local_host_addr + | |
2998 | (comp->offset - block->offset); | |
2999 | ||
3000 | ram_handle_compressed(host_addr, comp->value, comp->length); | |
3001 | break; | |
3002 | ||
3003 | case RDMA_CONTROL_REGISTER_FINISHED: | |
3004 | DDDPRINTF("Current registrations complete.\n"); | |
3005 | goto out; | |
3006 | ||
3007 | case RDMA_CONTROL_RAM_BLOCKS_REQUEST: | |
3008 | DPRINTF("Initial setup info requested.\n"); | |
3009 | ||
3010 | if (rdma->pin_all) { | |
3011 | ret = qemu_rdma_reg_whole_ram_blocks(rdma); | |
3012 | if (ret) { | |
3013 | fprintf(stderr, "rdma migration: error dest " | |
3014 | "registering ram blocks!\n"); | |
3015 | goto out; | |
3016 | } | |
3017 | } | |
3018 | ||
3019 | /* | |
3020 | * Dest uses this to prepare to transmit the RAMBlock descriptions | |
3021 | * to the source VM after connection setup. | |
3022 | * Both sides use the "remote" structure to communicate and update | |
3023 | * their "local" descriptions with what was sent. | |
3024 | */ | |
3025 | for (i = 0; i < local->nb_blocks; i++) { | |
3026 | rdma->block[i].remote_host_addr = | |
3027 | (uint64_t)(local->block[i].local_host_addr); | |
3028 | ||
3029 | if (rdma->pin_all) { | |
3030 | rdma->block[i].remote_rkey = local->block[i].mr->rkey; | |
3031 | } | |
3032 | ||
3033 | rdma->block[i].offset = local->block[i].offset; | |
3034 | rdma->block[i].length = local->block[i].length; | |
3035 | ||
3036 | remote_block_to_network(&rdma->block[i]); | |
3037 | } | |
3038 | ||
3039 | blocks.len = rdma->local_ram_blocks.nb_blocks | |
3040 | * sizeof(RDMARemoteBlock); | |
3041 | ||
3042 | ||
3043 | ret = qemu_rdma_post_send_control(rdma, | |
3044 | (uint8_t *) rdma->block, &blocks); | |
3045 | ||
3046 | if (ret < 0) { | |
3047 | fprintf(stderr, "rdma migration: error sending remote info!\n"); | |
3048 | goto out; | |
3049 | } | |
3050 | ||
3051 | break; | |
3052 | case RDMA_CONTROL_REGISTER_REQUEST: | |
3053 | DDPRINTF("There are %d registration requests\n", head.repeat); | |
3054 | ||
3055 | reg_resp.repeat = head.repeat; | |
3056 | registers = (RDMARegister *) rdma->wr_data[idx].control_curr; | |
3057 | ||
3058 | for (count = 0; count < head.repeat; count++) { | |
3059 | uint64_t chunk; | |
3060 | uint8_t *chunk_start, *chunk_end; | |
3061 | ||
3062 | reg = ®isters[count]; | |
3063 | network_to_register(reg); | |
3064 | ||
3065 | reg_result = &results[count]; | |
3066 | ||
3067 | DDPRINTF("Registration request (%d): index %d, current_addr %" | |
3068 | PRIu64 " chunks: %" PRIu64 "\n", count, | |
3069 | reg->current_index, reg->key.current_addr, reg->chunks); | |
3070 | ||
3071 | block = &(rdma->local_ram_blocks.block[reg->current_index]); | |
3072 | if (block->is_ram_block) { | |
3073 | host_addr = (block->local_host_addr + | |
3074 | (reg->key.current_addr - block->offset)); | |
3075 | chunk = ram_chunk_index(block->local_host_addr, | |
3076 | (uint8_t *) host_addr); | |
3077 | } else { | |
3078 | chunk = reg->key.chunk; | |
3079 | host_addr = block->local_host_addr + | |
3080 | (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); | |
3081 | } | |
3082 | chunk_start = ram_chunk_start(block, chunk); | |
3083 | chunk_end = ram_chunk_end(block, chunk + reg->chunks); | |
3084 | if (qemu_rdma_register_and_get_keys(rdma, block, | |
3085 | (uint8_t *)host_addr, NULL, ®_result->rkey, | |
3086 | chunk, chunk_start, chunk_end)) { | |
3087 | fprintf(stderr, "cannot get rkey!\n"); | |
3088 | ret = -EINVAL; | |
3089 | goto out; | |
3090 | } | |
3091 | ||
3092 | reg_result->host_addr = (uint64_t) block->local_host_addr; | |
3093 | ||
3094 | DDPRINTF("Registered rkey for this request: %x\n", | |
3095 | reg_result->rkey); | |
3096 | ||
3097 | result_to_network(reg_result); | |
3098 | } | |
3099 | ||
3100 | ret = qemu_rdma_post_send_control(rdma, | |
3101 | (uint8_t *) results, ®_resp); | |
3102 | ||
3103 | if (ret < 0) { | |
3104 | fprintf(stderr, "Failed to send control buffer!\n"); | |
3105 | goto out; | |
3106 | } | |
3107 | break; | |
3108 | case RDMA_CONTROL_UNREGISTER_REQUEST: | |
3109 | DDPRINTF("There are %d unregistration requests\n", head.repeat); | |
3110 | unreg_resp.repeat = head.repeat; | |
3111 | registers = (RDMARegister *) rdma->wr_data[idx].control_curr; | |
3112 | ||
3113 | for (count = 0; count < head.repeat; count++) { | |
3114 | reg = ®isters[count]; | |
3115 | network_to_register(reg); | |
3116 | ||
3117 | DDPRINTF("Unregistration request (%d): " | |
3118 | " index %d, chunk %" PRIu64 "\n", | |
3119 | count, reg->current_index, reg->key.chunk); | |
3120 | ||
3121 | block = &(rdma->local_ram_blocks.block[reg->current_index]); | |
3122 | ||
3123 | ret = ibv_dereg_mr(block->pmr[reg->key.chunk]); | |
3124 | block->pmr[reg->key.chunk] = NULL; | |
3125 | ||
3126 | if (ret != 0) { | |
3127 | perror("rdma unregistration chunk failed"); | |
3128 | ret = -ret; | |
3129 | goto out; | |
3130 | } | |
3131 | ||
3132 | rdma->total_registrations--; | |
3133 | ||
3134 | DDPRINTF("Unregistered chunk %" PRIu64 " successfully.\n", | |
3135 | reg->key.chunk); | |
3136 | } | |
3137 | ||
3138 | ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp); | |
3139 | ||
3140 | if (ret < 0) { | |
3141 | fprintf(stderr, "Failed to send control buffer!\n"); | |
3142 | goto out; | |
3143 | } | |
3144 | break; | |
3145 | case RDMA_CONTROL_REGISTER_RESULT: | |
3146 | fprintf(stderr, "Invalid RESULT message at dest.\n"); | |
3147 | ret = -EIO; | |
3148 | goto out; | |
3149 | default: | |
3150 | fprintf(stderr, "Unknown control message %s\n", | |
3151 | control_desc[head.type]); | |
3152 | ret = -EIO; | |
3153 | goto out; | |
3154 | } | |
3155 | } while (1); | |
3156 | out: | |
3157 | if (ret < 0) { | |
3158 | rdma->error_state = ret; | |
3159 | } | |
3160 | return ret; | |
3161 | } | |
3162 | ||
3163 | static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, | |
3164 | uint64_t flags) | |
3165 | { | |
3166 | QEMUFileRDMA *rfile = opaque; | |
3167 | RDMAContext *rdma = rfile->rdma; | |
3168 | ||
3169 | CHECK_ERROR_STATE(); | |
3170 | ||
3171 | DDDPRINTF("start section: %" PRIu64 "\n", flags); | |
3172 | qemu_put_be64(f, RAM_SAVE_FLAG_HOOK); | |
3173 | qemu_fflush(f); | |
3174 | ||
3175 | return 0; | |
3176 | } | |
3177 | ||
3178 | /* | |
3179 | * Inform dest that dynamic registrations are done for now. | |
3180 | * First, flush writes, if any. | |
3181 | */ | |
3182 | static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, | |
3183 | uint64_t flags) | |
3184 | { | |
3185 | Error *local_err = NULL, **errp = &local_err; | |
3186 | QEMUFileRDMA *rfile = opaque; | |
3187 | RDMAContext *rdma = rfile->rdma; | |
3188 | RDMAControlHeader head = { .len = 0, .repeat = 1 }; | |
3189 | int ret = 0; | |
3190 | ||
3191 | CHECK_ERROR_STATE(); | |
3192 | ||
3193 | qemu_fflush(f); | |
3194 | ret = qemu_rdma_drain_cq(f, rdma); | |
3195 | ||
3196 | if (ret < 0) { | |
3197 | goto err; | |
3198 | } | |
3199 | ||
3200 | if (flags == RAM_CONTROL_SETUP) { | |
3201 | RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; | |
3202 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
3203 | int reg_result_idx, i, j, nb_remote_blocks; | |
3204 | ||
3205 | head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; | |
3206 | DPRINTF("Sending registration setup for ram blocks...\n"); | |
3207 | ||
3208 | /* | |
3209 | * Make sure that we parallelize the pinning on both sides. | |
3210 | * For very large guests, doing this serially takes a really | |
3211 | * long time, so we have to 'interleave' the pinning locally | |
3212 | * with the control messages by performing the pinning on this | |
3213 | * side before we receive the control response from the other | |
3214 | * side that the pinning has completed. | |
3215 | */ | |
3216 | ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, | |
3217 | ®_result_idx, rdma->pin_all ? | |
3218 | qemu_rdma_reg_whole_ram_blocks : NULL); | |
3219 | if (ret < 0) { | |
66988941 | 3220 | ERROR(errp, "receiving remote info!"); |
2da776db MH |
3221 | return ret; |
3222 | } | |
3223 | ||
2da776db MH |
3224 | nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock); |
3225 | ||
3226 | /* | |
3227 | * The protocol uses two different sets of rkeys (mutually exclusive): | |
3228 | * 1. One key to represent the virtual address of the entire ram block. | |
3229 | * (dynamic chunk registration disabled - pin everything with one rkey.) | |
3230 | * 2. One to represent individual chunks within a ram block. | |
3231 | * (dynamic chunk registration enabled - pin individual chunks.) | |
3232 | * | |
3233 | * Once the capability is successfully negotiated, the destination transmits | |
3234 | * the keys to use (or sends them later) including the virtual addresses | |
3235 | * and then propagates the remote ram block descriptions to his local copy. | |
3236 | */ | |
3237 | ||
3238 | if (local->nb_blocks != nb_remote_blocks) { | |
3239 | ERROR(errp, "ram blocks mismatch #1! " | |
3240 | "Your QEMU command line parameters are probably " | |
66988941 | 3241 | "not identical on both the source and destination."); |
2da776db MH |
3242 | return -EINVAL; |
3243 | } | |
3244 | ||
885e8f98 IY |
3245 | qemu_rdma_move_header(rdma, reg_result_idx, &resp); |
3246 | memcpy(rdma->block, | |
3247 | rdma->wr_data[reg_result_idx].control_curr, resp.len); | |
2da776db MH |
3248 | for (i = 0; i < nb_remote_blocks; i++) { |
3249 | network_to_remote_block(&rdma->block[i]); | |
3250 | ||
3251 | /* search local ram blocks */ | |
3252 | for (j = 0; j < local->nb_blocks; j++) { | |
3253 | if (rdma->block[i].offset != local->block[j].offset) { | |
3254 | continue; | |
3255 | } | |
3256 | ||
3257 | if (rdma->block[i].length != local->block[j].length) { | |
3258 | ERROR(errp, "ram blocks mismatch #2! " | |
3259 | "Your QEMU command line parameters are probably " | |
66988941 | 3260 | "not identical on both the source and destination."); |
2da776db MH |
3261 | return -EINVAL; |
3262 | } | |
3263 | local->block[j].remote_host_addr = | |
3264 | rdma->block[i].remote_host_addr; | |
3265 | local->block[j].remote_rkey = rdma->block[i].remote_rkey; | |
3266 | break; | |
3267 | } | |
3268 | ||
3269 | if (j >= local->nb_blocks) { | |
3270 | ERROR(errp, "ram blocks mismatch #3! " | |
3271 | "Your QEMU command line parameters are probably " | |
66988941 | 3272 | "not identical on both the source and destination."); |
2da776db MH |
3273 | return -EINVAL; |
3274 | } | |
3275 | } | |
3276 | } | |
3277 | ||
3278 | DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags); | |
3279 | ||
3280 | head.type = RDMA_CONTROL_REGISTER_FINISHED; | |
3281 | ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL); | |
3282 | ||
3283 | if (ret < 0) { | |
3284 | goto err; | |
3285 | } | |
3286 | ||
3287 | return 0; | |
3288 | err: | |
3289 | rdma->error_state = ret; | |
3290 | return ret; | |
3291 | } | |
3292 | ||
3293 | static int qemu_rdma_get_fd(void *opaque) | |
3294 | { | |
3295 | QEMUFileRDMA *rfile = opaque; | |
3296 | RDMAContext *rdma = rfile->rdma; | |
3297 | ||
3298 | return rdma->comp_channel->fd; | |
3299 | } | |
3300 | ||
3301 | const QEMUFileOps rdma_read_ops = { | |
3302 | .get_buffer = qemu_rdma_get_buffer, | |
3303 | .get_fd = qemu_rdma_get_fd, | |
3304 | .close = qemu_rdma_close, | |
3305 | .hook_ram_load = qemu_rdma_registration_handle, | |
3306 | }; | |
3307 | ||
3308 | const QEMUFileOps rdma_write_ops = { | |
3309 | .put_buffer = qemu_rdma_put_buffer, | |
3310 | .close = qemu_rdma_close, | |
3311 | .before_ram_iterate = qemu_rdma_registration_start, | |
3312 | .after_ram_iterate = qemu_rdma_registration_stop, | |
3313 | .save_page = qemu_rdma_save_page, | |
3314 | }; | |
3315 | ||
3316 | static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode) | |
3317 | { | |
3318 | QEMUFileRDMA *r = g_malloc0(sizeof(QEMUFileRDMA)); | |
3319 | ||
3320 | if (qemu_file_mode_is_not_valid(mode)) { | |
3321 | return NULL; | |
3322 | } | |
3323 | ||
3324 | r->rdma = rdma; | |
3325 | ||
3326 | if (mode[0] == 'w') { | |
3327 | r->file = qemu_fopen_ops(r, &rdma_write_ops); | |
3328 | } else { | |
3329 | r->file = qemu_fopen_ops(r, &rdma_read_ops); | |
3330 | } | |
3331 | ||
3332 | return r->file; | |
3333 | } | |
3334 | ||
3335 | static void rdma_accept_incoming_migration(void *opaque) | |
3336 | { | |
3337 | RDMAContext *rdma = opaque; | |
3338 | int ret; | |
3339 | QEMUFile *f; | |
3340 | Error *local_err = NULL, **errp = &local_err; | |
3341 | ||
3342 | DPRINTF("Accepting rdma connection...\n"); | |
3343 | ret = qemu_rdma_accept(rdma); | |
3344 | ||
3345 | if (ret) { | |
66988941 | 3346 | ERROR(errp, "RDMA Migration initialization failed!"); |
2da776db MH |
3347 | return; |
3348 | } | |
3349 | ||
3350 | DPRINTF("Accepted migration\n"); | |
3351 | ||
3352 | f = qemu_fopen_rdma(rdma, "rb"); | |
3353 | if (f == NULL) { | |
66988941 | 3354 | ERROR(errp, "could not qemu_fopen_rdma!"); |
2da776db MH |
3355 | qemu_rdma_cleanup(rdma); |
3356 | return; | |
3357 | } | |
3358 | ||
3359 | rdma->migration_started_on_destination = 1; | |
3360 | process_incoming_migration(f); | |
3361 | } | |
3362 | ||
3363 | void rdma_start_incoming_migration(const char *host_port, Error **errp) | |
3364 | { | |
3365 | int ret; | |
3366 | RDMAContext *rdma; | |
3367 | Error *local_err = NULL; | |
3368 | ||
3369 | DPRINTF("Starting RDMA-based incoming migration\n"); | |
3370 | rdma = qemu_rdma_data_init(host_port, &local_err); | |
3371 | ||
3372 | if (rdma == NULL) { | |
3373 | goto err; | |
3374 | } | |
3375 | ||
3376 | ret = qemu_rdma_dest_init(rdma, &local_err); | |
3377 | ||
3378 | if (ret) { | |
3379 | goto err; | |
3380 | } | |
3381 | ||
3382 | DPRINTF("qemu_rdma_dest_init success\n"); | |
3383 | ||
3384 | ret = rdma_listen(rdma->listen_id, 5); | |
3385 | ||
3386 | if (ret) { | |
66988941 | 3387 | ERROR(errp, "listening on socket!"); |
2da776db MH |
3388 | goto err; |
3389 | } | |
3390 | ||
3391 | DPRINTF("rdma_listen success\n"); | |
3392 | ||
3393 | qemu_set_fd_handler2(rdma->channel->fd, NULL, | |
3394 | rdma_accept_incoming_migration, NULL, | |
3395 | (void *)(intptr_t) rdma); | |
3396 | return; | |
3397 | err: | |
3398 | error_propagate(errp, local_err); | |
3399 | g_free(rdma); | |
3400 | } | |
3401 | ||
3402 | void rdma_start_outgoing_migration(void *opaque, | |
3403 | const char *host_port, Error **errp) | |
3404 | { | |
3405 | MigrationState *s = opaque; | |
3406 | Error *local_err = NULL, **temp = &local_err; | |
3407 | RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err); | |
3408 | int ret = 0; | |
3409 | ||
3410 | if (rdma == NULL) { | |
66988941 | 3411 | ERROR(temp, "Failed to initialize RDMA data structures! %d", ret); |
2da776db MH |
3412 | goto err; |
3413 | } | |
3414 | ||
3415 | ret = qemu_rdma_source_init(rdma, &local_err, | |
41310c68 | 3416 | s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]); |
2da776db MH |
3417 | |
3418 | if (ret) { | |
3419 | goto err; | |
3420 | } | |
3421 | ||
3422 | DPRINTF("qemu_rdma_source_init success\n"); | |
3423 | ret = qemu_rdma_connect(rdma, &local_err); | |
3424 | ||
3425 | if (ret) { | |
3426 | goto err; | |
3427 | } | |
3428 | ||
3429 | DPRINTF("qemu_rdma_source_connect success\n"); | |
3430 | ||
3431 | s->file = qemu_fopen_rdma(rdma, "wb"); | |
3432 | migrate_fd_connect(s); | |
3433 | return; | |
3434 | err: | |
3435 | error_propagate(errp, local_err); | |
3436 | g_free(rdma); | |
3437 | migrate_fd_error(s); | |
3438 | } |