]>
Commit | Line | Data |
---|---|---|
2da776db MH |
1 | /* |
2 | * RDMA protocol and interfaces | |
3 | * | |
4 | * Copyright IBM, Corp. 2010-2013 | |
5 | * | |
6 | * Authors: | |
7 | * Michael R. Hines <mrhines@us.ibm.com> | |
8 | * Jiuxing Liu <jl@us.ibm.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
11 | * later. See the COPYING file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | #include "qemu-common.h" | |
15 | #include "migration/migration.h" | |
16 | #include "migration/qemu-file.h" | |
17 | #include "exec/cpu-common.h" | |
d49b6836 | 18 | #include "qemu/error-report.h" |
2da776db MH |
19 | #include "qemu/main-loop.h" |
20 | #include "qemu/sockets.h" | |
21 | #include "qemu/bitmap.h" | |
22 | #include "block/coroutine.h" | |
23 | #include <stdio.h> | |
24 | #include <sys/types.h> | |
25 | #include <sys/socket.h> | |
26 | #include <netdb.h> | |
27 | #include <arpa/inet.h> | |
28 | #include <string.h> | |
29 | #include <rdma/rdma_cma.h> | |
733252de | 30 | #include "trace.h" |
2da776db MH |
31 | |
32 | /* | |
33 | * Print and error on both the Monitor and the Log file. | |
34 | */ | |
35 | #define ERROR(errp, fmt, ...) \ | |
36 | do { \ | |
66988941 | 37 | fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \ |
2da776db MH |
38 | if (errp && (*(errp) == NULL)) { \ |
39 | error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \ | |
40 | } \ | |
41 | } while (0) | |
42 | ||
43 | #define RDMA_RESOLVE_TIMEOUT_MS 10000 | |
44 | ||
45 | /* Do not merge data if larger than this. */ | |
46 | #define RDMA_MERGE_MAX (2 * 1024 * 1024) | |
47 | #define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096) | |
48 | ||
49 | #define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */ | |
50 | ||
51 | /* | |
52 | * This is only for non-live state being migrated. | |
53 | * Instead of RDMA_WRITE messages, we use RDMA_SEND | |
54 | * messages for that state, which requires a different | |
55 | * delivery design than main memory. | |
56 | */ | |
57 | #define RDMA_SEND_INCREMENT 32768 | |
58 | ||
59 | /* | |
60 | * Maximum size infiniband SEND message | |
61 | */ | |
62 | #define RDMA_CONTROL_MAX_BUFFER (512 * 1024) | |
63 | #define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096 | |
64 | ||
65 | #define RDMA_CONTROL_VERSION_CURRENT 1 | |
66 | /* | |
67 | * Capabilities for negotiation. | |
68 | */ | |
69 | #define RDMA_CAPABILITY_PIN_ALL 0x01 | |
70 | ||
71 | /* | |
72 | * Add the other flags above to this list of known capabilities | |
73 | * as they are introduced. | |
74 | */ | |
75 | static uint32_t known_capabilities = RDMA_CAPABILITY_PIN_ALL; | |
76 | ||
77 | #define CHECK_ERROR_STATE() \ | |
78 | do { \ | |
79 | if (rdma->error_state) { \ | |
80 | if (!rdma->error_reported) { \ | |
733252de DDAG |
81 | error_report("RDMA is in an error state waiting migration" \ |
82 | " to abort!"); \ | |
2da776db MH |
83 | rdma->error_reported = 1; \ |
84 | } \ | |
85 | return rdma->error_state; \ | |
86 | } \ | |
87 | } while (0); | |
88 | ||
89 | /* | |
90 | * A work request ID is 64-bits and we split up these bits | |
91 | * into 3 parts: | |
92 | * | |
93 | * bits 0-15 : type of control message, 2^16 | |
94 | * bits 16-29: ram block index, 2^14 | |
95 | * bits 30-63: ram block chunk number, 2^34 | |
96 | * | |
97 | * The last two bit ranges are only used for RDMA writes, | |
98 | * in order to track their completion and potentially | |
99 | * also track unregistration status of the message. | |
100 | */ | |
101 | #define RDMA_WRID_TYPE_SHIFT 0UL | |
102 | #define RDMA_WRID_BLOCK_SHIFT 16UL | |
103 | #define RDMA_WRID_CHUNK_SHIFT 30UL | |
104 | ||
105 | #define RDMA_WRID_TYPE_MASK \ | |
106 | ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL) | |
107 | ||
108 | #define RDMA_WRID_BLOCK_MASK \ | |
109 | (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL)) | |
110 | ||
111 | #define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK) | |
112 | ||
113 | /* | |
114 | * RDMA migration protocol: | |
115 | * 1. RDMA Writes (data messages, i.e. RAM) | |
116 | * 2. IB Send/Recv (control channel messages) | |
117 | */ | |
118 | enum { | |
119 | RDMA_WRID_NONE = 0, | |
120 | RDMA_WRID_RDMA_WRITE = 1, | |
121 | RDMA_WRID_SEND_CONTROL = 2000, | |
122 | RDMA_WRID_RECV_CONTROL = 4000, | |
123 | }; | |
124 | ||
2ae31aea | 125 | static const char *wrid_desc[] = { |
2da776db MH |
126 | [RDMA_WRID_NONE] = "NONE", |
127 | [RDMA_WRID_RDMA_WRITE] = "WRITE RDMA", | |
128 | [RDMA_WRID_SEND_CONTROL] = "CONTROL SEND", | |
129 | [RDMA_WRID_RECV_CONTROL] = "CONTROL RECV", | |
130 | }; | |
131 | ||
132 | /* | |
133 | * Work request IDs for IB SEND messages only (not RDMA writes). | |
134 | * This is used by the migration protocol to transmit | |
135 | * control messages (such as device state and registration commands) | |
136 | * | |
137 | * We could use more WRs, but we have enough for now. | |
138 | */ | |
139 | enum { | |
140 | RDMA_WRID_READY = 0, | |
141 | RDMA_WRID_DATA, | |
142 | RDMA_WRID_CONTROL, | |
143 | RDMA_WRID_MAX, | |
144 | }; | |
145 | ||
146 | /* | |
147 | * SEND/RECV IB Control Messages. | |
148 | */ | |
149 | enum { | |
150 | RDMA_CONTROL_NONE = 0, | |
151 | RDMA_CONTROL_ERROR, | |
152 | RDMA_CONTROL_READY, /* ready to receive */ | |
153 | RDMA_CONTROL_QEMU_FILE, /* QEMUFile-transmitted bytes */ | |
154 | RDMA_CONTROL_RAM_BLOCKS_REQUEST, /* RAMBlock synchronization */ | |
155 | RDMA_CONTROL_RAM_BLOCKS_RESULT, /* RAMBlock synchronization */ | |
156 | RDMA_CONTROL_COMPRESS, /* page contains repeat values */ | |
157 | RDMA_CONTROL_REGISTER_REQUEST, /* dynamic page registration */ | |
158 | RDMA_CONTROL_REGISTER_RESULT, /* key to use after registration */ | |
159 | RDMA_CONTROL_REGISTER_FINISHED, /* current iteration finished */ | |
160 | RDMA_CONTROL_UNREGISTER_REQUEST, /* dynamic UN-registration */ | |
161 | RDMA_CONTROL_UNREGISTER_FINISHED, /* unpinning finished */ | |
162 | }; | |
163 | ||
2ae31aea | 164 | static const char *control_desc[] = { |
2da776db MH |
165 | [RDMA_CONTROL_NONE] = "NONE", |
166 | [RDMA_CONTROL_ERROR] = "ERROR", | |
167 | [RDMA_CONTROL_READY] = "READY", | |
168 | [RDMA_CONTROL_QEMU_FILE] = "QEMU FILE", | |
169 | [RDMA_CONTROL_RAM_BLOCKS_REQUEST] = "RAM BLOCKS REQUEST", | |
170 | [RDMA_CONTROL_RAM_BLOCKS_RESULT] = "RAM BLOCKS RESULT", | |
171 | [RDMA_CONTROL_COMPRESS] = "COMPRESS", | |
172 | [RDMA_CONTROL_REGISTER_REQUEST] = "REGISTER REQUEST", | |
173 | [RDMA_CONTROL_REGISTER_RESULT] = "REGISTER RESULT", | |
174 | [RDMA_CONTROL_REGISTER_FINISHED] = "REGISTER FINISHED", | |
175 | [RDMA_CONTROL_UNREGISTER_REQUEST] = "UNREGISTER REQUEST", | |
176 | [RDMA_CONTROL_UNREGISTER_FINISHED] = "UNREGISTER FINISHED", | |
177 | }; | |
178 | ||
179 | /* | |
180 | * Memory and MR structures used to represent an IB Send/Recv work request. | |
181 | * This is *not* used for RDMA writes, only IB Send/Recv. | |
182 | */ | |
183 | typedef struct { | |
184 | uint8_t control[RDMA_CONTROL_MAX_BUFFER]; /* actual buffer to register */ | |
185 | struct ibv_mr *control_mr; /* registration metadata */ | |
186 | size_t control_len; /* length of the message */ | |
187 | uint8_t *control_curr; /* start of unconsumed bytes */ | |
188 | } RDMAWorkRequestData; | |
189 | ||
190 | /* | |
191 | * Negotiate RDMA capabilities during connection-setup time. | |
192 | */ | |
193 | typedef struct { | |
194 | uint32_t version; | |
195 | uint32_t flags; | |
196 | } RDMACapabilities; | |
197 | ||
198 | static void caps_to_network(RDMACapabilities *cap) | |
199 | { | |
200 | cap->version = htonl(cap->version); | |
201 | cap->flags = htonl(cap->flags); | |
202 | } | |
203 | ||
204 | static void network_to_caps(RDMACapabilities *cap) | |
205 | { | |
206 | cap->version = ntohl(cap->version); | |
207 | cap->flags = ntohl(cap->flags); | |
208 | } | |
209 | ||
210 | /* | |
211 | * Representation of a RAMBlock from an RDMA perspective. | |
212 | * This is not transmitted, only local. | |
213 | * This and subsequent structures cannot be linked lists | |
214 | * because we're using a single IB message to transmit | |
215 | * the information. It's small anyway, so a list is overkill. | |
216 | */ | |
217 | typedef struct RDMALocalBlock { | |
4fb5364b DDAG |
218 | char *block_name; |
219 | uint8_t *local_host_addr; /* local virtual address */ | |
220 | uint64_t remote_host_addr; /* remote virtual address */ | |
221 | uint64_t offset; | |
222 | uint64_t length; | |
223 | struct ibv_mr **pmr; /* MRs for chunk-level registration */ | |
224 | struct ibv_mr *mr; /* MR for non-chunk-level registration */ | |
225 | uint32_t *remote_keys; /* rkeys for chunk-level registration */ | |
226 | uint32_t remote_rkey; /* rkeys for non-chunk-level registration */ | |
227 | int index; /* which block are we */ | |
228 | bool is_ram_block; | |
229 | int nb_chunks; | |
2da776db MH |
230 | unsigned long *transit_bitmap; |
231 | unsigned long *unregister_bitmap; | |
232 | } RDMALocalBlock; | |
233 | ||
234 | /* | |
235 | * Also represents a RAMblock, but only on the dest. | |
236 | * This gets transmitted by the dest during connection-time | |
237 | * to the source VM and then is used to populate the | |
238 | * corresponding RDMALocalBlock with | |
239 | * the information needed to perform the actual RDMA. | |
240 | */ | |
a97270ad | 241 | typedef struct QEMU_PACKED RDMADestBlock { |
2da776db MH |
242 | uint64_t remote_host_addr; |
243 | uint64_t offset; | |
244 | uint64_t length; | |
245 | uint32_t remote_rkey; | |
246 | uint32_t padding; | |
a97270ad | 247 | } RDMADestBlock; |
2da776db MH |
248 | |
249 | static uint64_t htonll(uint64_t v) | |
250 | { | |
251 | union { uint32_t lv[2]; uint64_t llv; } u; | |
252 | u.lv[0] = htonl(v >> 32); | |
253 | u.lv[1] = htonl(v & 0xFFFFFFFFULL); | |
254 | return u.llv; | |
255 | } | |
256 | ||
257 | static uint64_t ntohll(uint64_t v) { | |
258 | union { uint32_t lv[2]; uint64_t llv; } u; | |
259 | u.llv = v; | |
260 | return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]); | |
261 | } | |
262 | ||
a97270ad | 263 | static void dest_block_to_network(RDMADestBlock *db) |
2da776db | 264 | { |
a97270ad DDAG |
265 | db->remote_host_addr = htonll(db->remote_host_addr); |
266 | db->offset = htonll(db->offset); | |
267 | db->length = htonll(db->length); | |
268 | db->remote_rkey = htonl(db->remote_rkey); | |
2da776db MH |
269 | } |
270 | ||
a97270ad | 271 | static void network_to_dest_block(RDMADestBlock *db) |
2da776db | 272 | { |
a97270ad DDAG |
273 | db->remote_host_addr = ntohll(db->remote_host_addr); |
274 | db->offset = ntohll(db->offset); | |
275 | db->length = ntohll(db->length); | |
276 | db->remote_rkey = ntohl(db->remote_rkey); | |
2da776db MH |
277 | } |
278 | ||
279 | /* | |
280 | * Virtual address of the above structures used for transmitting | |
281 | * the RAMBlock descriptions at connection-time. | |
282 | * This structure is *not* transmitted. | |
283 | */ | |
284 | typedef struct RDMALocalBlocks { | |
285 | int nb_blocks; | |
286 | bool init; /* main memory init complete */ | |
287 | RDMALocalBlock *block; | |
288 | } RDMALocalBlocks; | |
289 | ||
290 | /* | |
291 | * Main data structure for RDMA state. | |
292 | * While there is only one copy of this structure being allocated right now, | |
293 | * this is the place where one would start if you wanted to consider | |
294 | * having more than one RDMA connection open at the same time. | |
295 | */ | |
296 | typedef struct RDMAContext { | |
297 | char *host; | |
298 | int port; | |
299 | ||
1f22364b | 300 | RDMAWorkRequestData wr_data[RDMA_WRID_MAX]; |
2da776db MH |
301 | |
302 | /* | |
303 | * This is used by *_exchange_send() to figure out whether or not | |
304 | * the initial "READY" message has already been received or not. | |
305 | * This is because other functions may potentially poll() and detect | |
306 | * the READY message before send() does, in which case we need to | |
307 | * know if it completed. | |
308 | */ | |
309 | int control_ready_expected; | |
310 | ||
311 | /* number of outstanding writes */ | |
312 | int nb_sent; | |
313 | ||
314 | /* store info about current buffer so that we can | |
315 | merge it with future sends */ | |
316 | uint64_t current_addr; | |
317 | uint64_t current_length; | |
318 | /* index of ram block the current buffer belongs to */ | |
319 | int current_index; | |
320 | /* index of the chunk in the current ram block */ | |
321 | int current_chunk; | |
322 | ||
323 | bool pin_all; | |
324 | ||
325 | /* | |
326 | * infiniband-specific variables for opening the device | |
327 | * and maintaining connection state and so forth. | |
328 | * | |
329 | * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in | |
330 | * cm_id->verbs, cm_id->channel, and cm_id->qp. | |
331 | */ | |
332 | struct rdma_cm_id *cm_id; /* connection manager ID */ | |
333 | struct rdma_cm_id *listen_id; | |
5a91337c | 334 | bool connected; |
2da776db MH |
335 | |
336 | struct ibv_context *verbs; | |
337 | struct rdma_event_channel *channel; | |
338 | struct ibv_qp *qp; /* queue pair */ | |
339 | struct ibv_comp_channel *comp_channel; /* completion channel */ | |
340 | struct ibv_pd *pd; /* protection domain */ | |
341 | struct ibv_cq *cq; /* completion queue */ | |
342 | ||
343 | /* | |
344 | * If a previous write failed (perhaps because of a failed | |
345 | * memory registration, then do not attempt any future work | |
346 | * and remember the error state. | |
347 | */ | |
348 | int error_state; | |
349 | int error_reported; | |
350 | ||
351 | /* | |
352 | * Description of ram blocks used throughout the code. | |
353 | */ | |
354 | RDMALocalBlocks local_ram_blocks; | |
a97270ad | 355 | RDMADestBlock *dest_blocks; |
2da776db MH |
356 | |
357 | /* | |
358 | * Migration on *destination* started. | |
359 | * Then use coroutine yield function. | |
360 | * Source runs in a thread, so we don't care. | |
361 | */ | |
362 | int migration_started_on_destination; | |
363 | ||
364 | int total_registrations; | |
365 | int total_writes; | |
366 | ||
367 | int unregister_current, unregister_next; | |
368 | uint64_t unregistrations[RDMA_SIGNALED_SEND_MAX]; | |
369 | ||
370 | GHashTable *blockmap; | |
371 | } RDMAContext; | |
372 | ||
373 | /* | |
374 | * Interface to the rest of the migration call stack. | |
375 | */ | |
376 | typedef struct QEMUFileRDMA { | |
377 | RDMAContext *rdma; | |
378 | size_t len; | |
379 | void *file; | |
380 | } QEMUFileRDMA; | |
381 | ||
382 | /* | |
383 | * Main structure for IB Send/Recv control messages. | |
384 | * This gets prepended at the beginning of every Send/Recv. | |
385 | */ | |
386 | typedef struct QEMU_PACKED { | |
387 | uint32_t len; /* Total length of data portion */ | |
388 | uint32_t type; /* which control command to perform */ | |
389 | uint32_t repeat; /* number of commands in data portion of same type */ | |
390 | uint32_t padding; | |
391 | } RDMAControlHeader; | |
392 | ||
393 | static void control_to_network(RDMAControlHeader *control) | |
394 | { | |
395 | control->type = htonl(control->type); | |
396 | control->len = htonl(control->len); | |
397 | control->repeat = htonl(control->repeat); | |
398 | } | |
399 | ||
400 | static void network_to_control(RDMAControlHeader *control) | |
401 | { | |
402 | control->type = ntohl(control->type); | |
403 | control->len = ntohl(control->len); | |
404 | control->repeat = ntohl(control->repeat); | |
405 | } | |
406 | ||
407 | /* | |
408 | * Register a single Chunk. | |
409 | * Information sent by the source VM to inform the dest | |
410 | * to register an single chunk of memory before we can perform | |
411 | * the actual RDMA operation. | |
412 | */ | |
413 | typedef struct QEMU_PACKED { | |
414 | union QEMU_PACKED { | |
b12f7777 | 415 | uint64_t current_addr; /* offset into the ram_addr_t space */ |
2da776db MH |
416 | uint64_t chunk; /* chunk to lookup if unregistering */ |
417 | } key; | |
418 | uint32_t current_index; /* which ramblock the chunk belongs to */ | |
419 | uint32_t padding; | |
420 | uint64_t chunks; /* how many sequential chunks to register */ | |
421 | } RDMARegister; | |
422 | ||
b12f7777 | 423 | static void register_to_network(RDMAContext *rdma, RDMARegister *reg) |
2da776db | 424 | { |
b12f7777 DDAG |
425 | RDMALocalBlock *local_block; |
426 | local_block = &rdma->local_ram_blocks.block[reg->current_index]; | |
427 | ||
428 | if (local_block->is_ram_block) { | |
429 | /* | |
430 | * current_addr as passed in is an address in the local ram_addr_t | |
431 | * space, we need to translate this for the destination | |
432 | */ | |
433 | reg->key.current_addr -= local_block->offset; | |
434 | reg->key.current_addr += rdma->dest_blocks[reg->current_index].offset; | |
435 | } | |
2da776db MH |
436 | reg->key.current_addr = htonll(reg->key.current_addr); |
437 | reg->current_index = htonl(reg->current_index); | |
438 | reg->chunks = htonll(reg->chunks); | |
439 | } | |
440 | ||
441 | static void network_to_register(RDMARegister *reg) | |
442 | { | |
443 | reg->key.current_addr = ntohll(reg->key.current_addr); | |
444 | reg->current_index = ntohl(reg->current_index); | |
445 | reg->chunks = ntohll(reg->chunks); | |
446 | } | |
447 | ||
448 | typedef struct QEMU_PACKED { | |
449 | uint32_t value; /* if zero, we will madvise() */ | |
450 | uint32_t block_idx; /* which ram block index */ | |
b12f7777 | 451 | uint64_t offset; /* Address in remote ram_addr_t space */ |
2da776db MH |
452 | uint64_t length; /* length of the chunk */ |
453 | } RDMACompress; | |
454 | ||
b12f7777 | 455 | static void compress_to_network(RDMAContext *rdma, RDMACompress *comp) |
2da776db MH |
456 | { |
457 | comp->value = htonl(comp->value); | |
b12f7777 DDAG |
458 | /* |
459 | * comp->offset as passed in is an address in the local ram_addr_t | |
460 | * space, we need to translate this for the destination | |
461 | */ | |
462 | comp->offset -= rdma->local_ram_blocks.block[comp->block_idx].offset; | |
463 | comp->offset += rdma->dest_blocks[comp->block_idx].offset; | |
2da776db MH |
464 | comp->block_idx = htonl(comp->block_idx); |
465 | comp->offset = htonll(comp->offset); | |
466 | comp->length = htonll(comp->length); | |
467 | } | |
468 | ||
469 | static void network_to_compress(RDMACompress *comp) | |
470 | { | |
471 | comp->value = ntohl(comp->value); | |
472 | comp->block_idx = ntohl(comp->block_idx); | |
473 | comp->offset = ntohll(comp->offset); | |
474 | comp->length = ntohll(comp->length); | |
475 | } | |
476 | ||
477 | /* | |
478 | * The result of the dest's memory registration produces an "rkey" | |
479 | * which the source VM must reference in order to perform | |
480 | * the RDMA operation. | |
481 | */ | |
482 | typedef struct QEMU_PACKED { | |
483 | uint32_t rkey; | |
484 | uint32_t padding; | |
485 | uint64_t host_addr; | |
486 | } RDMARegisterResult; | |
487 | ||
488 | static void result_to_network(RDMARegisterResult *result) | |
489 | { | |
490 | result->rkey = htonl(result->rkey); | |
491 | result->host_addr = htonll(result->host_addr); | |
492 | }; | |
493 | ||
494 | static void network_to_result(RDMARegisterResult *result) | |
495 | { | |
496 | result->rkey = ntohl(result->rkey); | |
497 | result->host_addr = ntohll(result->host_addr); | |
498 | }; | |
499 | ||
500 | const char *print_wrid(int wrid); | |
501 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, | |
502 | uint8_t *data, RDMAControlHeader *resp, | |
503 | int *resp_idx, | |
504 | int (*callback)(RDMAContext *rdma)); | |
505 | ||
dd286ed7 IY |
506 | static inline uint64_t ram_chunk_index(const uint8_t *start, |
507 | const uint8_t *host) | |
2da776db MH |
508 | { |
509 | return ((uintptr_t) host - (uintptr_t) start) >> RDMA_REG_CHUNK_SHIFT; | |
510 | } | |
511 | ||
dd286ed7 | 512 | static inline uint8_t *ram_chunk_start(const RDMALocalBlock *rdma_ram_block, |
2da776db MH |
513 | uint64_t i) |
514 | { | |
fbce8c25 SW |
515 | return (uint8_t *)(uintptr_t)(rdma_ram_block->local_host_addr + |
516 | (i << RDMA_REG_CHUNK_SHIFT)); | |
2da776db MH |
517 | } |
518 | ||
dd286ed7 IY |
519 | static inline uint8_t *ram_chunk_end(const RDMALocalBlock *rdma_ram_block, |
520 | uint64_t i) | |
2da776db MH |
521 | { |
522 | uint8_t *result = ram_chunk_start(rdma_ram_block, i) + | |
523 | (1UL << RDMA_REG_CHUNK_SHIFT); | |
524 | ||
525 | if (result > (rdma_ram_block->local_host_addr + rdma_ram_block->length)) { | |
526 | result = rdma_ram_block->local_host_addr + rdma_ram_block->length; | |
527 | } | |
528 | ||
529 | return result; | |
530 | } | |
531 | ||
4fb5364b DDAG |
532 | static int rdma_add_block(RDMAContext *rdma, const char *block_name, |
533 | void *host_addr, | |
2da776db MH |
534 | ram_addr_t block_offset, uint64_t length) |
535 | { | |
536 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
537 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
fbce8c25 | 538 | (void *)(uintptr_t)block_offset); |
2da776db MH |
539 | RDMALocalBlock *old = local->block; |
540 | ||
541 | assert(block == NULL); | |
542 | ||
543 | local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1)); | |
544 | ||
545 | if (local->nb_blocks) { | |
546 | int x; | |
547 | ||
548 | for (x = 0; x < local->nb_blocks; x++) { | |
fbce8c25 SW |
549 | g_hash_table_remove(rdma->blockmap, |
550 | (void *)(uintptr_t)old[x].offset); | |
551 | g_hash_table_insert(rdma->blockmap, | |
552 | (void *)(uintptr_t)old[x].offset, | |
553 | &local->block[x]); | |
2da776db MH |
554 | } |
555 | memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks); | |
556 | g_free(old); | |
557 | } | |
558 | ||
559 | block = &local->block[local->nb_blocks]; | |
560 | ||
4fb5364b | 561 | block->block_name = g_strdup(block_name); |
2da776db MH |
562 | block->local_host_addr = host_addr; |
563 | block->offset = block_offset; | |
564 | block->length = length; | |
565 | block->index = local->nb_blocks; | |
566 | block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL; | |
567 | block->transit_bitmap = bitmap_new(block->nb_chunks); | |
568 | bitmap_clear(block->transit_bitmap, 0, block->nb_chunks); | |
569 | block->unregister_bitmap = bitmap_new(block->nb_chunks); | |
570 | bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks); | |
571 | block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t)); | |
572 | ||
573 | block->is_ram_block = local->init ? false : true; | |
574 | ||
575 | g_hash_table_insert(rdma->blockmap, (void *) block_offset, block); | |
576 | ||
4fb5364b DDAG |
577 | trace_rdma_add_block(block_name, local->nb_blocks, |
578 | (uintptr_t) block->local_host_addr, | |
ba795761 | 579 | block->offset, block->length, |
fbce8c25 | 580 | (uintptr_t) (block->local_host_addr + block->length), |
ba795761 DDAG |
581 | BITS_TO_LONGS(block->nb_chunks) * |
582 | sizeof(unsigned long) * 8, | |
583 | block->nb_chunks); | |
2da776db MH |
584 | |
585 | local->nb_blocks++; | |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | /* | |
591 | * Memory regions need to be registered with the device and queue pairs setup | |
592 | * in advanced before the migration starts. This tells us where the RAM blocks | |
593 | * are so that we can register them individually. | |
594 | */ | |
e3807054 | 595 | static int qemu_rdma_init_one_block(const char *block_name, void *host_addr, |
2da776db MH |
596 | ram_addr_t block_offset, ram_addr_t length, void *opaque) |
597 | { | |
4fb5364b | 598 | return rdma_add_block(opaque, block_name, host_addr, block_offset, length); |
2da776db MH |
599 | } |
600 | ||
601 | /* | |
602 | * Identify the RAMBlocks and their quantity. They will be references to | |
603 | * identify chunk boundaries inside each RAMBlock and also be referenced | |
604 | * during dynamic page registration. | |
605 | */ | |
606 | static int qemu_rdma_init_ram_blocks(RDMAContext *rdma) | |
607 | { | |
608 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
609 | ||
610 | assert(rdma->blockmap == NULL); | |
611 | rdma->blockmap = g_hash_table_new(g_direct_hash, g_direct_equal); | |
612 | memset(local, 0, sizeof *local); | |
613 | qemu_ram_foreach_block(qemu_rdma_init_one_block, rdma); | |
733252de | 614 | trace_qemu_rdma_init_ram_blocks(local->nb_blocks); |
a97270ad | 615 | rdma->dest_blocks = (RDMADestBlock *) g_malloc0(sizeof(RDMADestBlock) * |
2da776db MH |
616 | rdma->local_ram_blocks.nb_blocks); |
617 | local->init = true; | |
618 | return 0; | |
619 | } | |
620 | ||
ba795761 | 621 | static int rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset) |
2da776db MH |
622 | { |
623 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
624 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
625 | (void *) block_offset); | |
626 | RDMALocalBlock *old = local->block; | |
627 | int x; | |
628 | ||
629 | assert(block); | |
630 | ||
631 | if (block->pmr) { | |
632 | int j; | |
633 | ||
634 | for (j = 0; j < block->nb_chunks; j++) { | |
635 | if (!block->pmr[j]) { | |
636 | continue; | |
637 | } | |
638 | ibv_dereg_mr(block->pmr[j]); | |
639 | rdma->total_registrations--; | |
640 | } | |
641 | g_free(block->pmr); | |
642 | block->pmr = NULL; | |
643 | } | |
644 | ||
645 | if (block->mr) { | |
646 | ibv_dereg_mr(block->mr); | |
647 | rdma->total_registrations--; | |
648 | block->mr = NULL; | |
649 | } | |
650 | ||
651 | g_free(block->transit_bitmap); | |
652 | block->transit_bitmap = NULL; | |
653 | ||
654 | g_free(block->unregister_bitmap); | |
655 | block->unregister_bitmap = NULL; | |
656 | ||
657 | g_free(block->remote_keys); | |
658 | block->remote_keys = NULL; | |
659 | ||
4fb5364b DDAG |
660 | g_free(block->block_name); |
661 | block->block_name = NULL; | |
662 | ||
2da776db | 663 | for (x = 0; x < local->nb_blocks; x++) { |
fbce8c25 | 664 | g_hash_table_remove(rdma->blockmap, (void *)(uintptr_t)old[x].offset); |
2da776db MH |
665 | } |
666 | ||
667 | if (local->nb_blocks > 1) { | |
668 | ||
669 | local->block = g_malloc0(sizeof(RDMALocalBlock) * | |
670 | (local->nb_blocks - 1)); | |
671 | ||
672 | if (block->index) { | |
673 | memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index); | |
674 | } | |
675 | ||
676 | if (block->index < (local->nb_blocks - 1)) { | |
677 | memcpy(local->block + block->index, old + (block->index + 1), | |
678 | sizeof(RDMALocalBlock) * | |
679 | (local->nb_blocks - (block->index + 1))); | |
680 | } | |
681 | } else { | |
682 | assert(block == local->block); | |
683 | local->block = NULL; | |
684 | } | |
685 | ||
ba795761 | 686 | trace_rdma_delete_block(local->nb_blocks, |
fbce8c25 | 687 | (uintptr_t)block->local_host_addr, |
733252de | 688 | block->offset, block->length, |
fbce8c25 | 689 | (uintptr_t)(block->local_host_addr + block->length), |
733252de DDAG |
690 | BITS_TO_LONGS(block->nb_chunks) * |
691 | sizeof(unsigned long) * 8, block->nb_chunks); | |
2da776db MH |
692 | |
693 | g_free(old); | |
694 | ||
695 | local->nb_blocks--; | |
696 | ||
697 | if (local->nb_blocks) { | |
698 | for (x = 0; x < local->nb_blocks; x++) { | |
fbce8c25 SW |
699 | g_hash_table_insert(rdma->blockmap, |
700 | (void *)(uintptr_t)local->block[x].offset, | |
701 | &local->block[x]); | |
2da776db MH |
702 | } |
703 | } | |
704 | ||
705 | return 0; | |
706 | } | |
707 | ||
708 | /* | |
709 | * Put in the log file which RDMA device was opened and the details | |
710 | * associated with that device. | |
711 | */ | |
712 | static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs) | |
713 | { | |
7fc5b13f MH |
714 | struct ibv_port_attr port; |
715 | ||
716 | if (ibv_query_port(verbs, 1, &port)) { | |
733252de | 717 | error_report("Failed to query port information"); |
7fc5b13f MH |
718 | return; |
719 | } | |
720 | ||
2da776db MH |
721 | printf("%s RDMA Device opened: kernel name %s " |
722 | "uverbs device name %s, " | |
7fc5b13f MH |
723 | "infiniband_verbs class device path %s, " |
724 | "infiniband class device path %s, " | |
725 | "transport: (%d) %s\n", | |
2da776db MH |
726 | who, |
727 | verbs->device->name, | |
728 | verbs->device->dev_name, | |
729 | verbs->device->dev_path, | |
7fc5b13f MH |
730 | verbs->device->ibdev_path, |
731 | port.link_layer, | |
732 | (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? "Infiniband" : | |
02942db7 | 733 | ((port.link_layer == IBV_LINK_LAYER_ETHERNET) |
7fc5b13f | 734 | ? "Ethernet" : "Unknown")); |
2da776db MH |
735 | } |
736 | ||
737 | /* | |
738 | * Put in the log file the RDMA gid addressing information, | |
739 | * useful for folks who have trouble understanding the | |
740 | * RDMA device hierarchy in the kernel. | |
741 | */ | |
742 | static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id) | |
743 | { | |
744 | char sgid[33]; | |
745 | char dgid[33]; | |
746 | inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.sgid, sgid, sizeof sgid); | |
747 | inet_ntop(AF_INET6, &id->route.addr.addr.ibaddr.dgid, dgid, sizeof dgid); | |
733252de | 748 | trace_qemu_rdma_dump_gid(who, sgid, dgid); |
2da776db MH |
749 | } |
750 | ||
7fc5b13f MH |
751 | /* |
752 | * As of now, IPv6 over RoCE / iWARP is not supported by linux. | |
753 | * We will try the next addrinfo struct, and fail if there are | |
754 | * no other valid addresses to bind against. | |
755 | * | |
756 | * If user is listening on '[::]', then we will not have a opened a device | |
757 | * yet and have no way of verifying if the device is RoCE or not. | |
758 | * | |
759 | * In this case, the source VM will throw an error for ALL types of | |
760 | * connections (both IPv4 and IPv6) if the destination machine does not have | |
761 | * a regular infiniband network available for use. | |
762 | * | |
4c293dc6 | 763 | * The only way to guarantee that an error is thrown for broken kernels is |
7fc5b13f MH |
764 | * for the management software to choose a *specific* interface at bind time |
765 | * and validate what time of hardware it is. | |
766 | * | |
767 | * Unfortunately, this puts the user in a fix: | |
02942db7 | 768 | * |
7fc5b13f MH |
769 | * If the source VM connects with an IPv4 address without knowing that the |
770 | * destination has bound to '[::]' the migration will unconditionally fail | |
771 | * unless the management software is explicitly listening on the the IPv4 | |
772 | * address while using a RoCE-based device. | |
773 | * | |
774 | * If the source VM connects with an IPv6 address, then we're OK because we can | |
775 | * throw an error on the source (and similarly on the destination). | |
02942db7 | 776 | * |
7fc5b13f MH |
777 | * But in mixed environments, this will be broken for a while until it is fixed |
778 | * inside linux. | |
779 | * | |
780 | * We do provide a *tiny* bit of help in this function: We can list all of the | |
781 | * devices in the system and check to see if all the devices are RoCE or | |
02942db7 | 782 | * Infiniband. |
7fc5b13f MH |
783 | * |
784 | * If we detect that we have a *pure* RoCE environment, then we can safely | |
4c293dc6 | 785 | * thrown an error even if the management software has specified '[::]' as the |
7fc5b13f MH |
786 | * bind address. |
787 | * | |
788 | * However, if there is are multiple hetergeneous devices, then we cannot make | |
789 | * this assumption and the user just has to be sure they know what they are | |
790 | * doing. | |
791 | * | |
792 | * Patches are being reviewed on linux-rdma. | |
793 | */ | |
794 | static int qemu_rdma_broken_ipv6_kernel(Error **errp, struct ibv_context *verbs) | |
795 | { | |
796 | struct ibv_port_attr port_attr; | |
797 | ||
798 | /* This bug only exists in linux, to our knowledge. */ | |
799 | #ifdef CONFIG_LINUX | |
800 | ||
02942db7 | 801 | /* |
7fc5b13f | 802 | * Verbs are only NULL if management has bound to '[::]'. |
02942db7 | 803 | * |
7fc5b13f MH |
804 | * Let's iterate through all the devices and see if there any pure IB |
805 | * devices (non-ethernet). | |
02942db7 | 806 | * |
7fc5b13f | 807 | * If not, then we can safely proceed with the migration. |
4c293dc6 | 808 | * Otherwise, there are no guarantees until the bug is fixed in linux. |
7fc5b13f MH |
809 | */ |
810 | if (!verbs) { | |
02942db7 | 811 | int num_devices, x; |
7fc5b13f MH |
812 | struct ibv_device ** dev_list = ibv_get_device_list(&num_devices); |
813 | bool roce_found = false; | |
814 | bool ib_found = false; | |
815 | ||
816 | for (x = 0; x < num_devices; x++) { | |
817 | verbs = ibv_open_device(dev_list[x]); | |
5b61d575 PR |
818 | if (!verbs) { |
819 | if (errno == EPERM) { | |
820 | continue; | |
821 | } else { | |
822 | return -EINVAL; | |
823 | } | |
824 | } | |
7fc5b13f MH |
825 | |
826 | if (ibv_query_port(verbs, 1, &port_attr)) { | |
827 | ibv_close_device(verbs); | |
828 | ERROR(errp, "Could not query initial IB port"); | |
829 | return -EINVAL; | |
830 | } | |
831 | ||
832 | if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) { | |
833 | ib_found = true; | |
834 | } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { | |
835 | roce_found = true; | |
836 | } | |
837 | ||
838 | ibv_close_device(verbs); | |
839 | ||
840 | } | |
841 | ||
842 | if (roce_found) { | |
843 | if (ib_found) { | |
844 | fprintf(stderr, "WARN: migrations may fail:" | |
845 | " IPv6 over RoCE / iWARP in linux" | |
846 | " is broken. But since you appear to have a" | |
847 | " mixed RoCE / IB environment, be sure to only" | |
848 | " migrate over the IB fabric until the kernel " | |
849 | " fixes the bug.\n"); | |
850 | } else { | |
851 | ERROR(errp, "You only have RoCE / iWARP devices in your systems" | |
852 | " and your management software has specified '[::]'" | |
853 | ", but IPv6 over RoCE / iWARP is not supported in Linux."); | |
854 | return -ENONET; | |
855 | } | |
856 | } | |
857 | ||
858 | return 0; | |
859 | } | |
860 | ||
861 | /* | |
862 | * If we have a verbs context, that means that some other than '[::]' was | |
02942db7 SW |
863 | * used by the management software for binding. In which case we can |
864 | * actually warn the user about a potentially broken kernel. | |
7fc5b13f MH |
865 | */ |
866 | ||
867 | /* IB ports start with 1, not 0 */ | |
868 | if (ibv_query_port(verbs, 1, &port_attr)) { | |
869 | ERROR(errp, "Could not query initial IB port"); | |
870 | return -EINVAL; | |
871 | } | |
872 | ||
873 | if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { | |
874 | ERROR(errp, "Linux kernel's RoCE / iWARP does not support IPv6 " | |
875 | "(but patches on linux-rdma in progress)"); | |
876 | return -ENONET; | |
877 | } | |
878 | ||
879 | #endif | |
880 | ||
881 | return 0; | |
882 | } | |
883 | ||
2da776db MH |
884 | /* |
885 | * Figure out which RDMA device corresponds to the requested IP hostname | |
886 | * Also create the initial connection manager identifiers for opening | |
887 | * the connection. | |
888 | */ | |
889 | static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp) | |
890 | { | |
891 | int ret; | |
7fc5b13f | 892 | struct rdma_addrinfo *res; |
2da776db MH |
893 | char port_str[16]; |
894 | struct rdma_cm_event *cm_event; | |
895 | char ip[40] = "unknown"; | |
7fc5b13f | 896 | struct rdma_addrinfo *e; |
2da776db MH |
897 | |
898 | if (rdma->host == NULL || !strcmp(rdma->host, "")) { | |
66988941 | 899 | ERROR(errp, "RDMA hostname has not been set"); |
7fc5b13f | 900 | return -EINVAL; |
2da776db MH |
901 | } |
902 | ||
903 | /* create CM channel */ | |
904 | rdma->channel = rdma_create_event_channel(); | |
905 | if (!rdma->channel) { | |
66988941 | 906 | ERROR(errp, "could not create CM channel"); |
7fc5b13f | 907 | return -EINVAL; |
2da776db MH |
908 | } |
909 | ||
910 | /* create CM id */ | |
911 | ret = rdma_create_id(rdma->channel, &rdma->cm_id, NULL, RDMA_PS_TCP); | |
912 | if (ret) { | |
66988941 | 913 | ERROR(errp, "could not create channel id"); |
2da776db MH |
914 | goto err_resolve_create_id; |
915 | } | |
916 | ||
917 | snprintf(port_str, 16, "%d", rdma->port); | |
918 | port_str[15] = '\0'; | |
919 | ||
7fc5b13f | 920 | ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res); |
2da776db | 921 | if (ret < 0) { |
7fc5b13f | 922 | ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host); |
2da776db MH |
923 | goto err_resolve_get_addr; |
924 | } | |
925 | ||
6470215b MH |
926 | for (e = res; e != NULL; e = e->ai_next) { |
927 | inet_ntop(e->ai_family, | |
7fc5b13f | 928 | &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); |
733252de | 929 | trace_qemu_rdma_resolve_host_trying(rdma->host, ip); |
2da776db | 930 | |
7fc5b13f | 931 | ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr, |
6470215b MH |
932 | RDMA_RESOLVE_TIMEOUT_MS); |
933 | if (!ret) { | |
c89aa2f1 MH |
934 | if (e->ai_family == AF_INET6) { |
935 | ret = qemu_rdma_broken_ipv6_kernel(errp, rdma->cm_id->verbs); | |
936 | if (ret) { | |
937 | continue; | |
938 | } | |
7fc5b13f | 939 | } |
6470215b MH |
940 | goto route; |
941 | } | |
2da776db MH |
942 | } |
943 | ||
6470215b MH |
944 | ERROR(errp, "could not resolve address %s", rdma->host); |
945 | goto err_resolve_get_addr; | |
946 | ||
947 | route: | |
2da776db MH |
948 | qemu_rdma_dump_gid("source_resolve_addr", rdma->cm_id); |
949 | ||
950 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
951 | if (ret) { | |
66988941 | 952 | ERROR(errp, "could not perform event_addr_resolved"); |
2da776db MH |
953 | goto err_resolve_get_addr; |
954 | } | |
955 | ||
956 | if (cm_event->event != RDMA_CM_EVENT_ADDR_RESOLVED) { | |
66988941 | 957 | ERROR(errp, "result not equal to event_addr_resolved %s", |
2da776db MH |
958 | rdma_event_str(cm_event->event)); |
959 | perror("rdma_resolve_addr"); | |
2a934347 | 960 | rdma_ack_cm_event(cm_event); |
7fc5b13f | 961 | ret = -EINVAL; |
2da776db MH |
962 | goto err_resolve_get_addr; |
963 | } | |
964 | rdma_ack_cm_event(cm_event); | |
965 | ||
966 | /* resolve route */ | |
967 | ret = rdma_resolve_route(rdma->cm_id, RDMA_RESOLVE_TIMEOUT_MS); | |
968 | if (ret) { | |
66988941 | 969 | ERROR(errp, "could not resolve rdma route"); |
2da776db MH |
970 | goto err_resolve_get_addr; |
971 | } | |
972 | ||
973 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
974 | if (ret) { | |
66988941 | 975 | ERROR(errp, "could not perform event_route_resolved"); |
2da776db MH |
976 | goto err_resolve_get_addr; |
977 | } | |
978 | if (cm_event->event != RDMA_CM_EVENT_ROUTE_RESOLVED) { | |
66988941 | 979 | ERROR(errp, "result not equal to event_route_resolved: %s", |
2da776db MH |
980 | rdma_event_str(cm_event->event)); |
981 | rdma_ack_cm_event(cm_event); | |
7fc5b13f | 982 | ret = -EINVAL; |
2da776db MH |
983 | goto err_resolve_get_addr; |
984 | } | |
985 | rdma_ack_cm_event(cm_event); | |
986 | rdma->verbs = rdma->cm_id->verbs; | |
987 | qemu_rdma_dump_id("source_resolve_host", rdma->cm_id->verbs); | |
988 | qemu_rdma_dump_gid("source_resolve_host", rdma->cm_id); | |
989 | return 0; | |
990 | ||
991 | err_resolve_get_addr: | |
992 | rdma_destroy_id(rdma->cm_id); | |
993 | rdma->cm_id = NULL; | |
994 | err_resolve_create_id: | |
995 | rdma_destroy_event_channel(rdma->channel); | |
996 | rdma->channel = NULL; | |
7fc5b13f | 997 | return ret; |
2da776db MH |
998 | } |
999 | ||
1000 | /* | |
1001 | * Create protection domain and completion queues | |
1002 | */ | |
1003 | static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma) | |
1004 | { | |
1005 | /* allocate pd */ | |
1006 | rdma->pd = ibv_alloc_pd(rdma->verbs); | |
1007 | if (!rdma->pd) { | |
733252de | 1008 | error_report("failed to allocate protection domain"); |
2da776db MH |
1009 | return -1; |
1010 | } | |
1011 | ||
1012 | /* create completion channel */ | |
1013 | rdma->comp_channel = ibv_create_comp_channel(rdma->verbs); | |
1014 | if (!rdma->comp_channel) { | |
733252de | 1015 | error_report("failed to allocate completion channel"); |
2da776db MH |
1016 | goto err_alloc_pd_cq; |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * Completion queue can be filled by both read and write work requests, | |
1021 | * so must reflect the sum of both possible queue sizes. | |
1022 | */ | |
1023 | rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), | |
1024 | NULL, rdma->comp_channel, 0); | |
1025 | if (!rdma->cq) { | |
733252de | 1026 | error_report("failed to allocate completion queue"); |
2da776db MH |
1027 | goto err_alloc_pd_cq; |
1028 | } | |
1029 | ||
1030 | return 0; | |
1031 | ||
1032 | err_alloc_pd_cq: | |
1033 | if (rdma->pd) { | |
1034 | ibv_dealloc_pd(rdma->pd); | |
1035 | } | |
1036 | if (rdma->comp_channel) { | |
1037 | ibv_destroy_comp_channel(rdma->comp_channel); | |
1038 | } | |
1039 | rdma->pd = NULL; | |
1040 | rdma->comp_channel = NULL; | |
1041 | return -1; | |
1042 | ||
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * Create queue pairs. | |
1047 | */ | |
1048 | static int qemu_rdma_alloc_qp(RDMAContext *rdma) | |
1049 | { | |
1050 | struct ibv_qp_init_attr attr = { 0 }; | |
1051 | int ret; | |
1052 | ||
1053 | attr.cap.max_send_wr = RDMA_SIGNALED_SEND_MAX; | |
1054 | attr.cap.max_recv_wr = 3; | |
1055 | attr.cap.max_send_sge = 1; | |
1056 | attr.cap.max_recv_sge = 1; | |
1057 | attr.send_cq = rdma->cq; | |
1058 | attr.recv_cq = rdma->cq; | |
1059 | attr.qp_type = IBV_QPT_RC; | |
1060 | ||
1061 | ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr); | |
1062 | if (ret) { | |
1063 | return -1; | |
1064 | } | |
1065 | ||
1066 | rdma->qp = rdma->cm_id->qp; | |
1067 | return 0; | |
1068 | } | |
1069 | ||
1070 | static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma) | |
1071 | { | |
1072 | int i; | |
1073 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
1074 | ||
1075 | for (i = 0; i < local->nb_blocks; i++) { | |
1076 | local->block[i].mr = | |
1077 | ibv_reg_mr(rdma->pd, | |
1078 | local->block[i].local_host_addr, | |
1079 | local->block[i].length, | |
1080 | IBV_ACCESS_LOCAL_WRITE | | |
1081 | IBV_ACCESS_REMOTE_WRITE | |
1082 | ); | |
1083 | if (!local->block[i].mr) { | |
1084 | perror("Failed to register local dest ram block!\n"); | |
1085 | break; | |
1086 | } | |
1087 | rdma->total_registrations++; | |
1088 | } | |
1089 | ||
1090 | if (i >= local->nb_blocks) { | |
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | for (i--; i >= 0; i--) { | |
1095 | ibv_dereg_mr(local->block[i].mr); | |
1096 | rdma->total_registrations--; | |
1097 | } | |
1098 | ||
1099 | return -1; | |
1100 | ||
1101 | } | |
1102 | ||
1103 | /* | |
1104 | * Find the ram block that corresponds to the page requested to be | |
1105 | * transmitted by QEMU. | |
1106 | * | |
1107 | * Once the block is found, also identify which 'chunk' within that | |
1108 | * block that the page belongs to. | |
1109 | * | |
1110 | * This search cannot fail or the migration will fail. | |
1111 | */ | |
1112 | static int qemu_rdma_search_ram_block(RDMAContext *rdma, | |
fbce8c25 | 1113 | uintptr_t block_offset, |
2da776db MH |
1114 | uint64_t offset, |
1115 | uint64_t length, | |
1116 | uint64_t *block_index, | |
1117 | uint64_t *chunk_index) | |
1118 | { | |
1119 | uint64_t current_addr = block_offset + offset; | |
1120 | RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, | |
1121 | (void *) block_offset); | |
1122 | assert(block); | |
1123 | assert(current_addr >= block->offset); | |
1124 | assert((current_addr + length) <= (block->offset + block->length)); | |
1125 | ||
1126 | *block_index = block->index; | |
1127 | *chunk_index = ram_chunk_index(block->local_host_addr, | |
1128 | block->local_host_addr + (current_addr - block->offset)); | |
1129 | ||
1130 | return 0; | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * Register a chunk with IB. If the chunk was already registered | |
1135 | * previously, then skip. | |
1136 | * | |
1137 | * Also return the keys associated with the registration needed | |
1138 | * to perform the actual RDMA operation. | |
1139 | */ | |
1140 | static int qemu_rdma_register_and_get_keys(RDMAContext *rdma, | |
3ac040c0 | 1141 | RDMALocalBlock *block, uintptr_t host_addr, |
2da776db MH |
1142 | uint32_t *lkey, uint32_t *rkey, int chunk, |
1143 | uint8_t *chunk_start, uint8_t *chunk_end) | |
1144 | { | |
1145 | if (block->mr) { | |
1146 | if (lkey) { | |
1147 | *lkey = block->mr->lkey; | |
1148 | } | |
1149 | if (rkey) { | |
1150 | *rkey = block->mr->rkey; | |
1151 | } | |
1152 | return 0; | |
1153 | } | |
1154 | ||
1155 | /* allocate memory to store chunk MRs */ | |
1156 | if (!block->pmr) { | |
1157 | block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *)); | |
2da776db MH |
1158 | } |
1159 | ||
1160 | /* | |
1161 | * If 'rkey', then we're the destination, so grant access to the source. | |
1162 | * | |
1163 | * If 'lkey', then we're the source VM, so grant access only to ourselves. | |
1164 | */ | |
1165 | if (!block->pmr[chunk]) { | |
1166 | uint64_t len = chunk_end - chunk_start; | |
1167 | ||
733252de | 1168 | trace_qemu_rdma_register_and_get_keys(len, chunk_start); |
2da776db MH |
1169 | |
1170 | block->pmr[chunk] = ibv_reg_mr(rdma->pd, | |
1171 | chunk_start, len, | |
1172 | (rkey ? (IBV_ACCESS_LOCAL_WRITE | | |
1173 | IBV_ACCESS_REMOTE_WRITE) : 0)); | |
1174 | ||
1175 | if (!block->pmr[chunk]) { | |
1176 | perror("Failed to register chunk!"); | |
1177 | fprintf(stderr, "Chunk details: block: %d chunk index %d" | |
3ac040c0 SW |
1178 | " start %" PRIuPTR " end %" PRIuPTR |
1179 | " host %" PRIuPTR | |
1180 | " local %" PRIuPTR " registrations: %d\n", | |
1181 | block->index, chunk, (uintptr_t)chunk_start, | |
1182 | (uintptr_t)chunk_end, host_addr, | |
1183 | (uintptr_t)block->local_host_addr, | |
2da776db MH |
1184 | rdma->total_registrations); |
1185 | return -1; | |
1186 | } | |
1187 | rdma->total_registrations++; | |
1188 | } | |
1189 | ||
1190 | if (lkey) { | |
1191 | *lkey = block->pmr[chunk]->lkey; | |
1192 | } | |
1193 | if (rkey) { | |
1194 | *rkey = block->pmr[chunk]->rkey; | |
1195 | } | |
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * Register (at connection time) the memory used for control | |
1201 | * channel messages. | |
1202 | */ | |
1203 | static int qemu_rdma_reg_control(RDMAContext *rdma, int idx) | |
1204 | { | |
1205 | rdma->wr_data[idx].control_mr = ibv_reg_mr(rdma->pd, | |
1206 | rdma->wr_data[idx].control, RDMA_CONTROL_MAX_BUFFER, | |
1207 | IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); | |
1208 | if (rdma->wr_data[idx].control_mr) { | |
1209 | rdma->total_registrations++; | |
1210 | return 0; | |
1211 | } | |
733252de | 1212 | error_report("qemu_rdma_reg_control failed"); |
2da776db MH |
1213 | return -1; |
1214 | } | |
1215 | ||
1216 | const char *print_wrid(int wrid) | |
1217 | { | |
1218 | if (wrid >= RDMA_WRID_RECV_CONTROL) { | |
1219 | return wrid_desc[RDMA_WRID_RECV_CONTROL]; | |
1220 | } | |
1221 | return wrid_desc[wrid]; | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * RDMA requires memory registration (mlock/pinning), but this is not good for | |
1226 | * overcommitment. | |
1227 | * | |
1228 | * In preparation for the future where LRU information or workload-specific | |
1229 | * writable writable working set memory access behavior is available to QEMU | |
1230 | * it would be nice to have in place the ability to UN-register/UN-pin | |
1231 | * particular memory regions from the RDMA hardware when it is determine that | |
1232 | * those regions of memory will likely not be accessed again in the near future. | |
1233 | * | |
1234 | * While we do not yet have such information right now, the following | |
1235 | * compile-time option allows us to perform a non-optimized version of this | |
1236 | * behavior. | |
1237 | * | |
1238 | * By uncommenting this option, you will cause *all* RDMA transfers to be | |
1239 | * unregistered immediately after the transfer completes on both sides of the | |
1240 | * connection. This has no effect in 'rdma-pin-all' mode, only regular mode. | |
1241 | * | |
1242 | * This will have a terrible impact on migration performance, so until future | |
1243 | * workload information or LRU information is available, do not attempt to use | |
1244 | * this feature except for basic testing. | |
1245 | */ | |
1246 | //#define RDMA_UNREGISTRATION_EXAMPLE | |
1247 | ||
1248 | /* | |
1249 | * Perform a non-optimized memory unregistration after every transfer | |
24ec68ef | 1250 | * for demonstration purposes, only if pin-all is not requested. |
2da776db MH |
1251 | * |
1252 | * Potential optimizations: | |
1253 | * 1. Start a new thread to run this function continuously | |
1254 | - for bit clearing | |
1255 | - and for receipt of unregister messages | |
1256 | * 2. Use an LRU. | |
1257 | * 3. Use workload hints. | |
1258 | */ | |
1259 | static int qemu_rdma_unregister_waiting(RDMAContext *rdma) | |
1260 | { | |
1261 | while (rdma->unregistrations[rdma->unregister_current]) { | |
1262 | int ret; | |
1263 | uint64_t wr_id = rdma->unregistrations[rdma->unregister_current]; | |
1264 | uint64_t chunk = | |
1265 | (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; | |
1266 | uint64_t index = | |
1267 | (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; | |
1268 | RDMALocalBlock *block = | |
1269 | &(rdma->local_ram_blocks.block[index]); | |
1270 | RDMARegister reg = { .current_index = index }; | |
1271 | RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED, | |
1272 | }; | |
1273 | RDMAControlHeader head = { .len = sizeof(RDMARegister), | |
1274 | .type = RDMA_CONTROL_UNREGISTER_REQUEST, | |
1275 | .repeat = 1, | |
1276 | }; | |
1277 | ||
733252de DDAG |
1278 | trace_qemu_rdma_unregister_waiting_proc(chunk, |
1279 | rdma->unregister_current); | |
2da776db MH |
1280 | |
1281 | rdma->unregistrations[rdma->unregister_current] = 0; | |
1282 | rdma->unregister_current++; | |
1283 | ||
1284 | if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) { | |
1285 | rdma->unregister_current = 0; | |
1286 | } | |
1287 | ||
1288 | ||
1289 | /* | |
1290 | * Unregistration is speculative (because migration is single-threaded | |
1291 | * and we cannot break the protocol's inifinband message ordering). | |
1292 | * Thus, if the memory is currently being used for transmission, | |
1293 | * then abort the attempt to unregister and try again | |
1294 | * later the next time a completion is received for this memory. | |
1295 | */ | |
1296 | clear_bit(chunk, block->unregister_bitmap); | |
1297 | ||
1298 | if (test_bit(chunk, block->transit_bitmap)) { | |
733252de | 1299 | trace_qemu_rdma_unregister_waiting_inflight(chunk); |
2da776db MH |
1300 | continue; |
1301 | } | |
1302 | ||
733252de | 1303 | trace_qemu_rdma_unregister_waiting_send(chunk); |
2da776db MH |
1304 | |
1305 | ret = ibv_dereg_mr(block->pmr[chunk]); | |
1306 | block->pmr[chunk] = NULL; | |
1307 | block->remote_keys[chunk] = 0; | |
1308 | ||
1309 | if (ret != 0) { | |
1310 | perror("unregistration chunk failed"); | |
1311 | return -ret; | |
1312 | } | |
1313 | rdma->total_registrations--; | |
1314 | ||
1315 | reg.key.chunk = chunk; | |
b12f7777 | 1316 | register_to_network(rdma, ®); |
2da776db MH |
1317 | ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, |
1318 | &resp, NULL, NULL); | |
1319 | if (ret < 0) { | |
1320 | return ret; | |
1321 | } | |
1322 | ||
733252de | 1323 | trace_qemu_rdma_unregister_waiting_complete(chunk); |
2da776db MH |
1324 | } |
1325 | ||
1326 | return 0; | |
1327 | } | |
1328 | ||
1329 | static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index, | |
1330 | uint64_t chunk) | |
1331 | { | |
1332 | uint64_t result = wr_id & RDMA_WRID_TYPE_MASK; | |
1333 | ||
1334 | result |= (index << RDMA_WRID_BLOCK_SHIFT); | |
1335 | result |= (chunk << RDMA_WRID_CHUNK_SHIFT); | |
1336 | ||
1337 | return result; | |
1338 | } | |
1339 | ||
1340 | /* | |
1341 | * Set bit for unregistration in the next iteration. | |
1342 | * We cannot transmit right here, but will unpin later. | |
1343 | */ | |
1344 | static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index, | |
1345 | uint64_t chunk, uint64_t wr_id) | |
1346 | { | |
1347 | if (rdma->unregistrations[rdma->unregister_next] != 0) { | |
733252de | 1348 | error_report("rdma migration: queue is full"); |
2da776db MH |
1349 | } else { |
1350 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); | |
1351 | ||
1352 | if (!test_and_set_bit(chunk, block->unregister_bitmap)) { | |
733252de DDAG |
1353 | trace_qemu_rdma_signal_unregister_append(chunk, |
1354 | rdma->unregister_next); | |
2da776db MH |
1355 | |
1356 | rdma->unregistrations[rdma->unregister_next++] = | |
1357 | qemu_rdma_make_wrid(wr_id, index, chunk); | |
1358 | ||
1359 | if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) { | |
1360 | rdma->unregister_next = 0; | |
1361 | } | |
1362 | } else { | |
733252de | 1363 | trace_qemu_rdma_signal_unregister_already(chunk); |
2da776db MH |
1364 | } |
1365 | } | |
1366 | } | |
1367 | ||
1368 | /* | |
1369 | * Consult the connection manager to see a work request | |
1370 | * (of any kind) has completed. | |
1371 | * Return the work request ID that completed. | |
1372 | */ | |
88571882 IY |
1373 | static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, |
1374 | uint32_t *byte_len) | |
2da776db MH |
1375 | { |
1376 | int ret; | |
1377 | struct ibv_wc wc; | |
1378 | uint64_t wr_id; | |
1379 | ||
1380 | ret = ibv_poll_cq(rdma->cq, 1, &wc); | |
1381 | ||
1382 | if (!ret) { | |
1383 | *wr_id_out = RDMA_WRID_NONE; | |
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | if (ret < 0) { | |
733252de | 1388 | error_report("ibv_poll_cq return %d", ret); |
2da776db MH |
1389 | return ret; |
1390 | } | |
1391 | ||
1392 | wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK; | |
1393 | ||
1394 | if (wc.status != IBV_WC_SUCCESS) { | |
1395 | fprintf(stderr, "ibv_poll_cq wc.status=%d %s!\n", | |
1396 | wc.status, ibv_wc_status_str(wc.status)); | |
1397 | fprintf(stderr, "ibv_poll_cq wrid=%s!\n", wrid_desc[wr_id]); | |
1398 | ||
1399 | return -1; | |
1400 | } | |
1401 | ||
1402 | if (rdma->control_ready_expected && | |
1403 | (wr_id >= RDMA_WRID_RECV_CONTROL)) { | |
733252de | 1404 | trace_qemu_rdma_poll_recv(wrid_desc[RDMA_WRID_RECV_CONTROL], |
2da776db MH |
1405 | wr_id - RDMA_WRID_RECV_CONTROL, wr_id, rdma->nb_sent); |
1406 | rdma->control_ready_expected = 0; | |
1407 | } | |
1408 | ||
1409 | if (wr_id == RDMA_WRID_RDMA_WRITE) { | |
1410 | uint64_t chunk = | |
1411 | (wc.wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; | |
1412 | uint64_t index = | |
1413 | (wc.wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; | |
1414 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); | |
1415 | ||
733252de | 1416 | trace_qemu_rdma_poll_write(print_wrid(wr_id), wr_id, rdma->nb_sent, |
fbce8c25 SW |
1417 | index, chunk, block->local_host_addr, |
1418 | (void *)(uintptr_t)block->remote_host_addr); | |
2da776db MH |
1419 | |
1420 | clear_bit(chunk, block->transit_bitmap); | |
1421 | ||
1422 | if (rdma->nb_sent > 0) { | |
1423 | rdma->nb_sent--; | |
1424 | } | |
1425 | ||
1426 | if (!rdma->pin_all) { | |
1427 | /* | |
1428 | * FYI: If one wanted to signal a specific chunk to be unregistered | |
1429 | * using LRU or workload-specific information, this is the function | |
1430 | * you would call to do so. That chunk would then get asynchronously | |
1431 | * unregistered later. | |
1432 | */ | |
1433 | #ifdef RDMA_UNREGISTRATION_EXAMPLE | |
1434 | qemu_rdma_signal_unregister(rdma, index, chunk, wc.wr_id); | |
1435 | #endif | |
1436 | } | |
1437 | } else { | |
733252de | 1438 | trace_qemu_rdma_poll_other(print_wrid(wr_id), wr_id, rdma->nb_sent); |
2da776db MH |
1439 | } |
1440 | ||
1441 | *wr_id_out = wc.wr_id; | |
88571882 IY |
1442 | if (byte_len) { |
1443 | *byte_len = wc.byte_len; | |
1444 | } | |
2da776db MH |
1445 | |
1446 | return 0; | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * Block until the next work request has completed. | |
1451 | * | |
1452 | * First poll to see if a work request has already completed, | |
1453 | * otherwise block. | |
1454 | * | |
1455 | * If we encounter completed work requests for IDs other than | |
1456 | * the one we're interested in, then that's generally an error. | |
1457 | * | |
1458 | * The only exception is actual RDMA Write completions. These | |
1459 | * completions only need to be recorded, but do not actually | |
1460 | * need further processing. | |
1461 | */ | |
88571882 IY |
1462 | static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, |
1463 | uint32_t *byte_len) | |
2da776db MH |
1464 | { |
1465 | int num_cq_events = 0, ret = 0; | |
1466 | struct ibv_cq *cq; | |
1467 | void *cq_ctx; | |
1468 | uint64_t wr_id = RDMA_WRID_NONE, wr_id_in; | |
1469 | ||
1470 | if (ibv_req_notify_cq(rdma->cq, 0)) { | |
1471 | return -1; | |
1472 | } | |
1473 | /* poll cq first */ | |
1474 | while (wr_id != wrid_requested) { | |
88571882 | 1475 | ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); |
2da776db MH |
1476 | if (ret < 0) { |
1477 | return ret; | |
1478 | } | |
1479 | ||
1480 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
1481 | ||
1482 | if (wr_id == RDMA_WRID_NONE) { | |
1483 | break; | |
1484 | } | |
1485 | if (wr_id != wrid_requested) { | |
733252de DDAG |
1486 | trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested), |
1487 | wrid_requested, print_wrid(wr_id), wr_id); | |
2da776db MH |
1488 | } |
1489 | } | |
1490 | ||
1491 | if (wr_id == wrid_requested) { | |
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | while (1) { | |
1496 | /* | |
1497 | * Coroutine doesn't start until process_incoming_migration() | |
1498 | * so don't yield unless we know we're running inside of a coroutine. | |
1499 | */ | |
1500 | if (rdma->migration_started_on_destination) { | |
1501 | yield_until_fd_readable(rdma->comp_channel->fd); | |
1502 | } | |
1503 | ||
1504 | if (ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx)) { | |
1505 | perror("ibv_get_cq_event"); | |
1506 | goto err_block_for_wrid; | |
1507 | } | |
1508 | ||
1509 | num_cq_events++; | |
1510 | ||
1511 | if (ibv_req_notify_cq(cq, 0)) { | |
1512 | goto err_block_for_wrid; | |
1513 | } | |
1514 | ||
1515 | while (wr_id != wrid_requested) { | |
88571882 | 1516 | ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); |
2da776db MH |
1517 | if (ret < 0) { |
1518 | goto err_block_for_wrid; | |
1519 | } | |
1520 | ||
1521 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
1522 | ||
1523 | if (wr_id == RDMA_WRID_NONE) { | |
1524 | break; | |
1525 | } | |
1526 | if (wr_id != wrid_requested) { | |
733252de DDAG |
1527 | trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested), |
1528 | wrid_requested, print_wrid(wr_id), wr_id); | |
2da776db MH |
1529 | } |
1530 | } | |
1531 | ||
1532 | if (wr_id == wrid_requested) { | |
1533 | goto success_block_for_wrid; | |
1534 | } | |
1535 | } | |
1536 | ||
1537 | success_block_for_wrid: | |
1538 | if (num_cq_events) { | |
1539 | ibv_ack_cq_events(cq, num_cq_events); | |
1540 | } | |
1541 | return 0; | |
1542 | ||
1543 | err_block_for_wrid: | |
1544 | if (num_cq_events) { | |
1545 | ibv_ack_cq_events(cq, num_cq_events); | |
1546 | } | |
1547 | return ret; | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * Post a SEND message work request for the control channel | |
1552 | * containing some data and block until the post completes. | |
1553 | */ | |
1554 | static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf, | |
1555 | RDMAControlHeader *head) | |
1556 | { | |
1557 | int ret = 0; | |
1f22364b | 1558 | RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL]; |
2da776db MH |
1559 | struct ibv_send_wr *bad_wr; |
1560 | struct ibv_sge sge = { | |
fbce8c25 | 1561 | .addr = (uintptr_t)(wr->control), |
2da776db MH |
1562 | .length = head->len + sizeof(RDMAControlHeader), |
1563 | .lkey = wr->control_mr->lkey, | |
1564 | }; | |
1565 | struct ibv_send_wr send_wr = { | |
1566 | .wr_id = RDMA_WRID_SEND_CONTROL, | |
1567 | .opcode = IBV_WR_SEND, | |
1568 | .send_flags = IBV_SEND_SIGNALED, | |
1569 | .sg_list = &sge, | |
1570 | .num_sge = 1, | |
1571 | }; | |
1572 | ||
733252de | 1573 | trace_qemu_rdma_post_send_control(control_desc[head->type]); |
2da776db MH |
1574 | |
1575 | /* | |
1576 | * We don't actually need to do a memcpy() in here if we used | |
1577 | * the "sge" properly, but since we're only sending control messages | |
1578 | * (not RAM in a performance-critical path), then its OK for now. | |
1579 | * | |
1580 | * The copy makes the RDMAControlHeader simpler to manipulate | |
1581 | * for the time being. | |
1582 | */ | |
6f1484ed | 1583 | assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head)); |
2da776db MH |
1584 | memcpy(wr->control, head, sizeof(RDMAControlHeader)); |
1585 | control_to_network((void *) wr->control); | |
1586 | ||
1587 | if (buf) { | |
1588 | memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len); | |
1589 | } | |
1590 | ||
1591 | ||
e325b49a | 1592 | ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); |
2da776db | 1593 | |
e325b49a | 1594 | if (ret > 0) { |
733252de | 1595 | error_report("Failed to use post IB SEND for control"); |
e325b49a | 1596 | return -ret; |
2da776db MH |
1597 | } |
1598 | ||
88571882 | 1599 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL); |
2da776db | 1600 | if (ret < 0) { |
733252de | 1601 | error_report("rdma migration: send polling control error"); |
2da776db MH |
1602 | } |
1603 | ||
1604 | return ret; | |
1605 | } | |
1606 | ||
1607 | /* | |
1608 | * Post a RECV work request in anticipation of some future receipt | |
1609 | * of data on the control channel. | |
1610 | */ | |
1611 | static int qemu_rdma_post_recv_control(RDMAContext *rdma, int idx) | |
1612 | { | |
1613 | struct ibv_recv_wr *bad_wr; | |
1614 | struct ibv_sge sge = { | |
fbce8c25 | 1615 | .addr = (uintptr_t)(rdma->wr_data[idx].control), |
2da776db MH |
1616 | .length = RDMA_CONTROL_MAX_BUFFER, |
1617 | .lkey = rdma->wr_data[idx].control_mr->lkey, | |
1618 | }; | |
1619 | ||
1620 | struct ibv_recv_wr recv_wr = { | |
1621 | .wr_id = RDMA_WRID_RECV_CONTROL + idx, | |
1622 | .sg_list = &sge, | |
1623 | .num_sge = 1, | |
1624 | }; | |
1625 | ||
1626 | ||
1627 | if (ibv_post_recv(rdma->qp, &recv_wr, &bad_wr)) { | |
1628 | return -1; | |
1629 | } | |
1630 | ||
1631 | return 0; | |
1632 | } | |
1633 | ||
1634 | /* | |
1635 | * Block and wait for a RECV control channel message to arrive. | |
1636 | */ | |
1637 | static int qemu_rdma_exchange_get_response(RDMAContext *rdma, | |
1638 | RDMAControlHeader *head, int expecting, int idx) | |
1639 | { | |
88571882 IY |
1640 | uint32_t byte_len; |
1641 | int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx, | |
1642 | &byte_len); | |
2da776db MH |
1643 | |
1644 | if (ret < 0) { | |
733252de | 1645 | error_report("rdma migration: recv polling control error!"); |
2da776db MH |
1646 | return ret; |
1647 | } | |
1648 | ||
1649 | network_to_control((void *) rdma->wr_data[idx].control); | |
1650 | memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader)); | |
1651 | ||
733252de | 1652 | trace_qemu_rdma_exchange_get_response_start(control_desc[expecting]); |
2da776db MH |
1653 | |
1654 | if (expecting == RDMA_CONTROL_NONE) { | |
733252de DDAG |
1655 | trace_qemu_rdma_exchange_get_response_none(control_desc[head->type], |
1656 | head->type); | |
2da776db | 1657 | } else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) { |
733252de DDAG |
1658 | error_report("Was expecting a %s (%d) control message" |
1659 | ", but got: %s (%d), length: %d", | |
2da776db MH |
1660 | control_desc[expecting], expecting, |
1661 | control_desc[head->type], head->type, head->len); | |
1662 | return -EIO; | |
1663 | } | |
6f1484ed | 1664 | if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) { |
81b07353 | 1665 | error_report("too long length: %d", head->len); |
6f1484ed IY |
1666 | return -EINVAL; |
1667 | } | |
88571882 | 1668 | if (sizeof(*head) + head->len != byte_len) { |
733252de | 1669 | error_report("Malformed length: %d byte_len %d", head->len, byte_len); |
88571882 IY |
1670 | return -EINVAL; |
1671 | } | |
2da776db MH |
1672 | |
1673 | return 0; | |
1674 | } | |
1675 | ||
1676 | /* | |
1677 | * When a RECV work request has completed, the work request's | |
1678 | * buffer is pointed at the header. | |
1679 | * | |
1680 | * This will advance the pointer to the data portion | |
1681 | * of the control message of the work request's buffer that | |
1682 | * was populated after the work request finished. | |
1683 | */ | |
1684 | static void qemu_rdma_move_header(RDMAContext *rdma, int idx, | |
1685 | RDMAControlHeader *head) | |
1686 | { | |
1687 | rdma->wr_data[idx].control_len = head->len; | |
1688 | rdma->wr_data[idx].control_curr = | |
1689 | rdma->wr_data[idx].control + sizeof(RDMAControlHeader); | |
1690 | } | |
1691 | ||
1692 | /* | |
1693 | * This is an 'atomic' high-level operation to deliver a single, unified | |
1694 | * control-channel message. | |
1695 | * | |
1696 | * Additionally, if the user is expecting some kind of reply to this message, | |
1697 | * they can request a 'resp' response message be filled in by posting an | |
1698 | * additional work request on behalf of the user and waiting for an additional | |
1699 | * completion. | |
1700 | * | |
1701 | * The extra (optional) response is used during registration to us from having | |
1702 | * to perform an *additional* exchange of message just to provide a response by | |
1703 | * instead piggy-backing on the acknowledgement. | |
1704 | */ | |
1705 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, | |
1706 | uint8_t *data, RDMAControlHeader *resp, | |
1707 | int *resp_idx, | |
1708 | int (*callback)(RDMAContext *rdma)) | |
1709 | { | |
1710 | int ret = 0; | |
1711 | ||
1712 | /* | |
1713 | * Wait until the dest is ready before attempting to deliver the message | |
1714 | * by waiting for a READY message. | |
1715 | */ | |
1716 | if (rdma->control_ready_expected) { | |
1717 | RDMAControlHeader resp; | |
1718 | ret = qemu_rdma_exchange_get_response(rdma, | |
1719 | &resp, RDMA_CONTROL_READY, RDMA_WRID_READY); | |
1720 | if (ret < 0) { | |
1721 | return ret; | |
1722 | } | |
1723 | } | |
1724 | ||
1725 | /* | |
1726 | * If the user is expecting a response, post a WR in anticipation of it. | |
1727 | */ | |
1728 | if (resp) { | |
1729 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA); | |
1730 | if (ret) { | |
733252de | 1731 | error_report("rdma migration: error posting" |
2da776db MH |
1732 | " extra control recv for anticipated result!"); |
1733 | return ret; | |
1734 | } | |
1735 | } | |
1736 | ||
1737 | /* | |
1738 | * Post a WR to replace the one we just consumed for the READY message. | |
1739 | */ | |
1740 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); | |
1741 | if (ret) { | |
733252de | 1742 | error_report("rdma migration: error posting first control recv!"); |
2da776db MH |
1743 | return ret; |
1744 | } | |
1745 | ||
1746 | /* | |
1747 | * Deliver the control message that was requested. | |
1748 | */ | |
1749 | ret = qemu_rdma_post_send_control(rdma, data, head); | |
1750 | ||
1751 | if (ret < 0) { | |
733252de | 1752 | error_report("Failed to send control buffer!"); |
2da776db MH |
1753 | return ret; |
1754 | } | |
1755 | ||
1756 | /* | |
1757 | * If we're expecting a response, block and wait for it. | |
1758 | */ | |
1759 | if (resp) { | |
1760 | if (callback) { | |
733252de | 1761 | trace_qemu_rdma_exchange_send_issue_callback(); |
2da776db MH |
1762 | ret = callback(rdma); |
1763 | if (ret < 0) { | |
1764 | return ret; | |
1765 | } | |
1766 | } | |
1767 | ||
733252de | 1768 | trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]); |
2da776db MH |
1769 | ret = qemu_rdma_exchange_get_response(rdma, resp, |
1770 | resp->type, RDMA_WRID_DATA); | |
1771 | ||
1772 | if (ret < 0) { | |
1773 | return ret; | |
1774 | } | |
1775 | ||
1776 | qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp); | |
1777 | if (resp_idx) { | |
1778 | *resp_idx = RDMA_WRID_DATA; | |
1779 | } | |
733252de | 1780 | trace_qemu_rdma_exchange_send_received(control_desc[resp->type]); |
2da776db MH |
1781 | } |
1782 | ||
1783 | rdma->control_ready_expected = 1; | |
1784 | ||
1785 | return 0; | |
1786 | } | |
1787 | ||
1788 | /* | |
1789 | * This is an 'atomic' high-level operation to receive a single, unified | |
1790 | * control-channel message. | |
1791 | */ | |
1792 | static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head, | |
1793 | int expecting) | |
1794 | { | |
1795 | RDMAControlHeader ready = { | |
1796 | .len = 0, | |
1797 | .type = RDMA_CONTROL_READY, | |
1798 | .repeat = 1, | |
1799 | }; | |
1800 | int ret; | |
1801 | ||
1802 | /* | |
1803 | * Inform the source that we're ready to receive a message. | |
1804 | */ | |
1805 | ret = qemu_rdma_post_send_control(rdma, NULL, &ready); | |
1806 | ||
1807 | if (ret < 0) { | |
733252de | 1808 | error_report("Failed to send control buffer!"); |
2da776db MH |
1809 | return ret; |
1810 | } | |
1811 | ||
1812 | /* | |
1813 | * Block and wait for the message. | |
1814 | */ | |
1815 | ret = qemu_rdma_exchange_get_response(rdma, head, | |
1816 | expecting, RDMA_WRID_READY); | |
1817 | ||
1818 | if (ret < 0) { | |
1819 | return ret; | |
1820 | } | |
1821 | ||
1822 | qemu_rdma_move_header(rdma, RDMA_WRID_READY, head); | |
1823 | ||
1824 | /* | |
1825 | * Post a new RECV work request to replace the one we just consumed. | |
1826 | */ | |
1827 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); | |
1828 | if (ret) { | |
733252de | 1829 | error_report("rdma migration: error posting second control recv!"); |
2da776db MH |
1830 | return ret; |
1831 | } | |
1832 | ||
1833 | return 0; | |
1834 | } | |
1835 | ||
1836 | /* | |
1837 | * Write an actual chunk of memory using RDMA. | |
1838 | * | |
1839 | * If we're using dynamic registration on the dest-side, we have to | |
1840 | * send a registration command first. | |
1841 | */ | |
1842 | static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma, | |
1843 | int current_index, uint64_t current_addr, | |
1844 | uint64_t length) | |
1845 | { | |
1846 | struct ibv_sge sge; | |
1847 | struct ibv_send_wr send_wr = { 0 }; | |
1848 | struct ibv_send_wr *bad_wr; | |
1849 | int reg_result_idx, ret, count = 0; | |
1850 | uint64_t chunk, chunks; | |
1851 | uint8_t *chunk_start, *chunk_end; | |
1852 | RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); | |
1853 | RDMARegister reg; | |
1854 | RDMARegisterResult *reg_result; | |
1855 | RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT }; | |
1856 | RDMAControlHeader head = { .len = sizeof(RDMARegister), | |
1857 | .type = RDMA_CONTROL_REGISTER_REQUEST, | |
1858 | .repeat = 1, | |
1859 | }; | |
1860 | ||
1861 | retry: | |
fbce8c25 | 1862 | sge.addr = (uintptr_t)(block->local_host_addr + |
2da776db MH |
1863 | (current_addr - block->offset)); |
1864 | sge.length = length; | |
1865 | ||
fbce8c25 SW |
1866 | chunk = ram_chunk_index(block->local_host_addr, |
1867 | (uint8_t *)(uintptr_t)sge.addr); | |
2da776db MH |
1868 | chunk_start = ram_chunk_start(block, chunk); |
1869 | ||
1870 | if (block->is_ram_block) { | |
1871 | chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); | |
1872 | ||
1873 | if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { | |
1874 | chunks--; | |
1875 | } | |
1876 | } else { | |
1877 | chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); | |
1878 | ||
1879 | if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { | |
1880 | chunks--; | |
1881 | } | |
1882 | } | |
1883 | ||
733252de DDAG |
1884 | trace_qemu_rdma_write_one_top(chunks + 1, |
1885 | (chunks + 1) * | |
1886 | (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); | |
2da776db MH |
1887 | |
1888 | chunk_end = ram_chunk_end(block, chunk + chunks); | |
1889 | ||
1890 | if (!rdma->pin_all) { | |
1891 | #ifdef RDMA_UNREGISTRATION_EXAMPLE | |
1892 | qemu_rdma_unregister_waiting(rdma); | |
1893 | #endif | |
1894 | } | |
1895 | ||
1896 | while (test_bit(chunk, block->transit_bitmap)) { | |
1897 | (void)count; | |
733252de | 1898 | trace_qemu_rdma_write_one_block(count++, current_index, chunk, |
2da776db MH |
1899 | sge.addr, length, rdma->nb_sent, block->nb_chunks); |
1900 | ||
88571882 | 1901 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db MH |
1902 | |
1903 | if (ret < 0) { | |
733252de | 1904 | error_report("Failed to Wait for previous write to complete " |
2da776db | 1905 | "block %d chunk %" PRIu64 |
733252de | 1906 | " current %" PRIu64 " len %" PRIu64 " %d", |
2da776db MH |
1907 | current_index, chunk, sge.addr, length, rdma->nb_sent); |
1908 | return ret; | |
1909 | } | |
1910 | } | |
1911 | ||
1912 | if (!rdma->pin_all || !block->is_ram_block) { | |
1913 | if (!block->remote_keys[chunk]) { | |
1914 | /* | |
1915 | * This chunk has not yet been registered, so first check to see | |
1916 | * if the entire chunk is zero. If so, tell the other size to | |
1917 | * memset() + madvise() the entire chunk without RDMA. | |
1918 | */ | |
1919 | ||
fbce8c25 SW |
1920 | if (can_use_buffer_find_nonzero_offset((void *)(uintptr_t)sge.addr, |
1921 | length) | |
1922 | && buffer_find_nonzero_offset((void *)(uintptr_t)sge.addr, | |
2da776db MH |
1923 | length) == length) { |
1924 | RDMACompress comp = { | |
1925 | .offset = current_addr, | |
1926 | .value = 0, | |
1927 | .block_idx = current_index, | |
1928 | .length = length, | |
1929 | }; | |
1930 | ||
1931 | head.len = sizeof(comp); | |
1932 | head.type = RDMA_CONTROL_COMPRESS; | |
1933 | ||
733252de DDAG |
1934 | trace_qemu_rdma_write_one_zero(chunk, sge.length, |
1935 | current_index, current_addr); | |
2da776db | 1936 | |
b12f7777 | 1937 | compress_to_network(rdma, &comp); |
2da776db MH |
1938 | ret = qemu_rdma_exchange_send(rdma, &head, |
1939 | (uint8_t *) &comp, NULL, NULL, NULL); | |
1940 | ||
1941 | if (ret < 0) { | |
1942 | return -EIO; | |
1943 | } | |
1944 | ||
1945 | acct_update_position(f, sge.length, true); | |
1946 | ||
1947 | return 1; | |
1948 | } | |
1949 | ||
1950 | /* | |
1951 | * Otherwise, tell other side to register. | |
1952 | */ | |
1953 | reg.current_index = current_index; | |
1954 | if (block->is_ram_block) { | |
1955 | reg.key.current_addr = current_addr; | |
1956 | } else { | |
1957 | reg.key.chunk = chunk; | |
1958 | } | |
1959 | reg.chunks = chunks; | |
1960 | ||
733252de DDAG |
1961 | trace_qemu_rdma_write_one_sendreg(chunk, sge.length, current_index, |
1962 | current_addr); | |
2da776db | 1963 | |
b12f7777 | 1964 | register_to_network(rdma, ®); |
2da776db MH |
1965 | ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, |
1966 | &resp, ®_result_idx, NULL); | |
1967 | if (ret < 0) { | |
1968 | return ret; | |
1969 | } | |
1970 | ||
1971 | /* try to overlap this single registration with the one we sent. */ | |
3ac040c0 | 1972 | if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr, |
2da776db MH |
1973 | &sge.lkey, NULL, chunk, |
1974 | chunk_start, chunk_end)) { | |
733252de | 1975 | error_report("cannot get lkey"); |
2da776db MH |
1976 | return -EINVAL; |
1977 | } | |
1978 | ||
1979 | reg_result = (RDMARegisterResult *) | |
1980 | rdma->wr_data[reg_result_idx].control_curr; | |
1981 | ||
1982 | network_to_result(reg_result); | |
1983 | ||
733252de DDAG |
1984 | trace_qemu_rdma_write_one_recvregres(block->remote_keys[chunk], |
1985 | reg_result->rkey, chunk); | |
2da776db MH |
1986 | |
1987 | block->remote_keys[chunk] = reg_result->rkey; | |
1988 | block->remote_host_addr = reg_result->host_addr; | |
1989 | } else { | |
1990 | /* already registered before */ | |
3ac040c0 | 1991 | if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr, |
2da776db MH |
1992 | &sge.lkey, NULL, chunk, |
1993 | chunk_start, chunk_end)) { | |
733252de | 1994 | error_report("cannot get lkey!"); |
2da776db MH |
1995 | return -EINVAL; |
1996 | } | |
1997 | } | |
1998 | ||
1999 | send_wr.wr.rdma.rkey = block->remote_keys[chunk]; | |
2000 | } else { | |
2001 | send_wr.wr.rdma.rkey = block->remote_rkey; | |
2002 | ||
3ac040c0 | 2003 | if (qemu_rdma_register_and_get_keys(rdma, block, sge.addr, |
2da776db MH |
2004 | &sge.lkey, NULL, chunk, |
2005 | chunk_start, chunk_end)) { | |
733252de | 2006 | error_report("cannot get lkey!"); |
2da776db MH |
2007 | return -EINVAL; |
2008 | } | |
2009 | } | |
2010 | ||
2011 | /* | |
2012 | * Encode the ram block index and chunk within this wrid. | |
2013 | * We will use this information at the time of completion | |
2014 | * to figure out which bitmap to check against and then which | |
2015 | * chunk in the bitmap to look for. | |
2016 | */ | |
2017 | send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE, | |
2018 | current_index, chunk); | |
2019 | ||
2020 | send_wr.opcode = IBV_WR_RDMA_WRITE; | |
2021 | send_wr.send_flags = IBV_SEND_SIGNALED; | |
2022 | send_wr.sg_list = &sge; | |
2023 | send_wr.num_sge = 1; | |
2024 | send_wr.wr.rdma.remote_addr = block->remote_host_addr + | |
2025 | (current_addr - block->offset); | |
2026 | ||
733252de DDAG |
2027 | trace_qemu_rdma_write_one_post(chunk, sge.addr, send_wr.wr.rdma.remote_addr, |
2028 | sge.length); | |
2da776db MH |
2029 | |
2030 | /* | |
2031 | * ibv_post_send() does not return negative error numbers, | |
2032 | * per the specification they are positive - no idea why. | |
2033 | */ | |
2034 | ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); | |
2035 | ||
2036 | if (ret == ENOMEM) { | |
733252de | 2037 | trace_qemu_rdma_write_one_queue_full(); |
88571882 | 2038 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db | 2039 | if (ret < 0) { |
733252de DDAG |
2040 | error_report("rdma migration: failed to make " |
2041 | "room in full send queue! %d", ret); | |
2da776db MH |
2042 | return ret; |
2043 | } | |
2044 | ||
2045 | goto retry; | |
2046 | ||
2047 | } else if (ret > 0) { | |
2048 | perror("rdma migration: post rdma write failed"); | |
2049 | return -ret; | |
2050 | } | |
2051 | ||
2052 | set_bit(chunk, block->transit_bitmap); | |
2053 | acct_update_position(f, sge.length, false); | |
2054 | rdma->total_writes++; | |
2055 | ||
2056 | return 0; | |
2057 | } | |
2058 | ||
2059 | /* | |
2060 | * Push out any unwritten RDMA operations. | |
2061 | * | |
2062 | * We support sending out multiple chunks at the same time. | |
2063 | * Not all of them need to get signaled in the completion queue. | |
2064 | */ | |
2065 | static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma) | |
2066 | { | |
2067 | int ret; | |
2068 | ||
2069 | if (!rdma->current_length) { | |
2070 | return 0; | |
2071 | } | |
2072 | ||
2073 | ret = qemu_rdma_write_one(f, rdma, | |
2074 | rdma->current_index, rdma->current_addr, rdma->current_length); | |
2075 | ||
2076 | if (ret < 0) { | |
2077 | return ret; | |
2078 | } | |
2079 | ||
2080 | if (ret == 0) { | |
2081 | rdma->nb_sent++; | |
733252de | 2082 | trace_qemu_rdma_write_flush(rdma->nb_sent); |
2da776db MH |
2083 | } |
2084 | ||
2085 | rdma->current_length = 0; | |
2086 | rdma->current_addr = 0; | |
2087 | ||
2088 | return 0; | |
2089 | } | |
2090 | ||
2091 | static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma, | |
2092 | uint64_t offset, uint64_t len) | |
2093 | { | |
44b59494 IY |
2094 | RDMALocalBlock *block; |
2095 | uint8_t *host_addr; | |
2096 | uint8_t *chunk_end; | |
2097 | ||
2098 | if (rdma->current_index < 0) { | |
2099 | return 0; | |
2100 | } | |
2101 | ||
2102 | if (rdma->current_chunk < 0) { | |
2103 | return 0; | |
2104 | } | |
2105 | ||
2106 | block = &(rdma->local_ram_blocks.block[rdma->current_index]); | |
2107 | host_addr = block->local_host_addr + (offset - block->offset); | |
2108 | chunk_end = ram_chunk_end(block, rdma->current_chunk); | |
2da776db MH |
2109 | |
2110 | if (rdma->current_length == 0) { | |
2111 | return 0; | |
2112 | } | |
2113 | ||
2114 | /* | |
2115 | * Only merge into chunk sequentially. | |
2116 | */ | |
2117 | if (offset != (rdma->current_addr + rdma->current_length)) { | |
2118 | return 0; | |
2119 | } | |
2120 | ||
2da776db MH |
2121 | if (offset < block->offset) { |
2122 | return 0; | |
2123 | } | |
2124 | ||
2125 | if ((offset + len) > (block->offset + block->length)) { | |
2126 | return 0; | |
2127 | } | |
2128 | ||
2da776db MH |
2129 | if ((host_addr + len) > chunk_end) { |
2130 | return 0; | |
2131 | } | |
2132 | ||
2133 | return 1; | |
2134 | } | |
2135 | ||
2136 | /* | |
2137 | * We're not actually writing here, but doing three things: | |
2138 | * | |
2139 | * 1. Identify the chunk the buffer belongs to. | |
2140 | * 2. If the chunk is full or the buffer doesn't belong to the current | |
2141 | * chunk, then start a new chunk and flush() the old chunk. | |
2142 | * 3. To keep the hardware busy, we also group chunks into batches | |
2143 | * and only require that a batch gets acknowledged in the completion | |
2144 | * qeueue instead of each individual chunk. | |
2145 | */ | |
2146 | static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma, | |
2147 | uint64_t block_offset, uint64_t offset, | |
2148 | uint64_t len) | |
2149 | { | |
2150 | uint64_t current_addr = block_offset + offset; | |
2151 | uint64_t index = rdma->current_index; | |
2152 | uint64_t chunk = rdma->current_chunk; | |
2153 | int ret; | |
2154 | ||
2155 | /* If we cannot merge it, we flush the current buffer first. */ | |
2156 | if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) { | |
2157 | ret = qemu_rdma_write_flush(f, rdma); | |
2158 | if (ret) { | |
2159 | return ret; | |
2160 | } | |
2161 | rdma->current_length = 0; | |
2162 | rdma->current_addr = current_addr; | |
2163 | ||
2164 | ret = qemu_rdma_search_ram_block(rdma, block_offset, | |
2165 | offset, len, &index, &chunk); | |
2166 | if (ret) { | |
733252de | 2167 | error_report("ram block search failed"); |
2da776db MH |
2168 | return ret; |
2169 | } | |
2170 | rdma->current_index = index; | |
2171 | rdma->current_chunk = chunk; | |
2172 | } | |
2173 | ||
2174 | /* merge it */ | |
2175 | rdma->current_length += len; | |
2176 | ||
2177 | /* flush it if buffer is too large */ | |
2178 | if (rdma->current_length >= RDMA_MERGE_MAX) { | |
2179 | return qemu_rdma_write_flush(f, rdma); | |
2180 | } | |
2181 | ||
2182 | return 0; | |
2183 | } | |
2184 | ||
2185 | static void qemu_rdma_cleanup(RDMAContext *rdma) | |
2186 | { | |
2187 | struct rdma_cm_event *cm_event; | |
2188 | int ret, idx; | |
2189 | ||
5a91337c | 2190 | if (rdma->cm_id && rdma->connected) { |
2da776db MH |
2191 | if (rdma->error_state) { |
2192 | RDMAControlHeader head = { .len = 0, | |
2193 | .type = RDMA_CONTROL_ERROR, | |
2194 | .repeat = 1, | |
2195 | }; | |
733252de | 2196 | error_report("Early error. Sending error."); |
2da776db MH |
2197 | qemu_rdma_post_send_control(rdma, NULL, &head); |
2198 | } | |
2199 | ||
2200 | ret = rdma_disconnect(rdma->cm_id); | |
2201 | if (!ret) { | |
733252de | 2202 | trace_qemu_rdma_cleanup_waiting_for_disconnect(); |
2da776db MH |
2203 | ret = rdma_get_cm_event(rdma->channel, &cm_event); |
2204 | if (!ret) { | |
2205 | rdma_ack_cm_event(cm_event); | |
2206 | } | |
2207 | } | |
733252de | 2208 | trace_qemu_rdma_cleanup_disconnect(); |
5a91337c | 2209 | rdma->connected = false; |
2da776db MH |
2210 | } |
2211 | ||
a97270ad DDAG |
2212 | g_free(rdma->dest_blocks); |
2213 | rdma->dest_blocks = NULL; | |
2da776db | 2214 | |
1f22364b | 2215 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2216 | if (rdma->wr_data[idx].control_mr) { |
2217 | rdma->total_registrations--; | |
2218 | ibv_dereg_mr(rdma->wr_data[idx].control_mr); | |
2219 | } | |
2220 | rdma->wr_data[idx].control_mr = NULL; | |
2221 | } | |
2222 | ||
2223 | if (rdma->local_ram_blocks.block) { | |
2224 | while (rdma->local_ram_blocks.nb_blocks) { | |
ba795761 | 2225 | rdma_delete_block(rdma, rdma->local_ram_blocks.block->offset); |
2da776db MH |
2226 | } |
2227 | } | |
2228 | ||
80b262e1 PR |
2229 | if (rdma->qp) { |
2230 | rdma_destroy_qp(rdma->cm_id); | |
2231 | rdma->qp = NULL; | |
2232 | } | |
2da776db MH |
2233 | if (rdma->cq) { |
2234 | ibv_destroy_cq(rdma->cq); | |
2235 | rdma->cq = NULL; | |
2236 | } | |
2237 | if (rdma->comp_channel) { | |
2238 | ibv_destroy_comp_channel(rdma->comp_channel); | |
2239 | rdma->comp_channel = NULL; | |
2240 | } | |
2241 | if (rdma->pd) { | |
2242 | ibv_dealloc_pd(rdma->pd); | |
2243 | rdma->pd = NULL; | |
2244 | } | |
2da776db MH |
2245 | if (rdma->cm_id) { |
2246 | rdma_destroy_id(rdma->cm_id); | |
2247 | rdma->cm_id = NULL; | |
2248 | } | |
80b262e1 PR |
2249 | if (rdma->listen_id) { |
2250 | rdma_destroy_id(rdma->listen_id); | |
2251 | rdma->listen_id = NULL; | |
2252 | } | |
2da776db MH |
2253 | if (rdma->channel) { |
2254 | rdma_destroy_event_channel(rdma->channel); | |
2255 | rdma->channel = NULL; | |
2256 | } | |
e1d0fb37 IY |
2257 | g_free(rdma->host); |
2258 | rdma->host = NULL; | |
2da776db MH |
2259 | } |
2260 | ||
2261 | ||
2262 | static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all) | |
2263 | { | |
2264 | int ret, idx; | |
2265 | Error *local_err = NULL, **temp = &local_err; | |
2266 | ||
2267 | /* | |
2268 | * Will be validated against destination's actual capabilities | |
2269 | * after the connect() completes. | |
2270 | */ | |
2271 | rdma->pin_all = pin_all; | |
2272 | ||
2273 | ret = qemu_rdma_resolve_host(rdma, temp); | |
2274 | if (ret) { | |
2275 | goto err_rdma_source_init; | |
2276 | } | |
2277 | ||
2278 | ret = qemu_rdma_alloc_pd_cq(rdma); | |
2279 | if (ret) { | |
2280 | ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()" | |
2281 | " limits may be too low. Please check $ ulimit -a # and " | |
66988941 | 2282 | "search for 'ulimit -l' in the output"); |
2da776db MH |
2283 | goto err_rdma_source_init; |
2284 | } | |
2285 | ||
2286 | ret = qemu_rdma_alloc_qp(rdma); | |
2287 | if (ret) { | |
66988941 | 2288 | ERROR(temp, "rdma migration: error allocating qp!"); |
2da776db MH |
2289 | goto err_rdma_source_init; |
2290 | } | |
2291 | ||
2292 | ret = qemu_rdma_init_ram_blocks(rdma); | |
2293 | if (ret) { | |
66988941 | 2294 | ERROR(temp, "rdma migration: error initializing ram blocks!"); |
2da776db MH |
2295 | goto err_rdma_source_init; |
2296 | } | |
2297 | ||
1f22364b | 2298 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2299 | ret = qemu_rdma_reg_control(rdma, idx); |
2300 | if (ret) { | |
66988941 | 2301 | ERROR(temp, "rdma migration: error registering %d control!", |
2da776db MH |
2302 | idx); |
2303 | goto err_rdma_source_init; | |
2304 | } | |
2305 | } | |
2306 | ||
2307 | return 0; | |
2308 | ||
2309 | err_rdma_source_init: | |
2310 | error_propagate(errp, local_err); | |
2311 | qemu_rdma_cleanup(rdma); | |
2312 | return -1; | |
2313 | } | |
2314 | ||
2315 | static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) | |
2316 | { | |
2317 | RDMACapabilities cap = { | |
2318 | .version = RDMA_CONTROL_VERSION_CURRENT, | |
2319 | .flags = 0, | |
2320 | }; | |
2321 | struct rdma_conn_param conn_param = { .initiator_depth = 2, | |
2322 | .retry_count = 5, | |
2323 | .private_data = &cap, | |
2324 | .private_data_len = sizeof(cap), | |
2325 | }; | |
2326 | struct rdma_cm_event *cm_event; | |
2327 | int ret; | |
2328 | ||
2329 | /* | |
2330 | * Only negotiate the capability with destination if the user | |
2331 | * on the source first requested the capability. | |
2332 | */ | |
2333 | if (rdma->pin_all) { | |
733252de | 2334 | trace_qemu_rdma_connect_pin_all_requested(); |
2da776db MH |
2335 | cap.flags |= RDMA_CAPABILITY_PIN_ALL; |
2336 | } | |
2337 | ||
2338 | caps_to_network(&cap); | |
2339 | ||
2340 | ret = rdma_connect(rdma->cm_id, &conn_param); | |
2341 | if (ret) { | |
2342 | perror("rdma_connect"); | |
66988941 | 2343 | ERROR(errp, "connecting to destination!"); |
2da776db MH |
2344 | goto err_rdma_source_connect; |
2345 | } | |
2346 | ||
2347 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2348 | if (ret) { | |
2349 | perror("rdma_get_cm_event after rdma_connect"); | |
66988941 | 2350 | ERROR(errp, "connecting to destination!"); |
2da776db | 2351 | rdma_ack_cm_event(cm_event); |
2da776db MH |
2352 | goto err_rdma_source_connect; |
2353 | } | |
2354 | ||
2355 | if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { | |
2356 | perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect"); | |
66988941 | 2357 | ERROR(errp, "connecting to destination!"); |
2da776db | 2358 | rdma_ack_cm_event(cm_event); |
2da776db MH |
2359 | goto err_rdma_source_connect; |
2360 | } | |
5a91337c | 2361 | rdma->connected = true; |
2da776db MH |
2362 | |
2363 | memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); | |
2364 | network_to_caps(&cap); | |
2365 | ||
2366 | /* | |
2367 | * Verify that the *requested* capabilities are supported by the destination | |
2368 | * and disable them otherwise. | |
2369 | */ | |
2370 | if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) { | |
2371 | ERROR(errp, "Server cannot support pinning all memory. " | |
66988941 | 2372 | "Will register memory dynamically."); |
2da776db MH |
2373 | rdma->pin_all = false; |
2374 | } | |
2375 | ||
733252de | 2376 | trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all); |
2da776db MH |
2377 | |
2378 | rdma_ack_cm_event(cm_event); | |
2379 | ||
87772639 | 2380 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); |
2da776db | 2381 | if (ret) { |
66988941 | 2382 | ERROR(errp, "posting second control recv!"); |
2da776db MH |
2383 | goto err_rdma_source_connect; |
2384 | } | |
2385 | ||
2386 | rdma->control_ready_expected = 1; | |
2387 | rdma->nb_sent = 0; | |
2388 | return 0; | |
2389 | ||
2390 | err_rdma_source_connect: | |
2391 | qemu_rdma_cleanup(rdma); | |
2392 | return -1; | |
2393 | } | |
2394 | ||
2395 | static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) | |
2396 | { | |
1dbd2fd9 | 2397 | int ret, idx; |
2da776db MH |
2398 | struct rdma_cm_id *listen_id; |
2399 | char ip[40] = "unknown"; | |
1dbd2fd9 | 2400 | struct rdma_addrinfo *res, *e; |
b58c8552 | 2401 | char port_str[16]; |
2da776db | 2402 | |
1f22364b | 2403 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2404 | rdma->wr_data[idx].control_len = 0; |
2405 | rdma->wr_data[idx].control_curr = NULL; | |
2406 | } | |
2407 | ||
1dbd2fd9 | 2408 | if (!rdma->host || !rdma->host[0]) { |
66988941 | 2409 | ERROR(errp, "RDMA host is not set!"); |
2da776db MH |
2410 | rdma->error_state = -EINVAL; |
2411 | return -1; | |
2412 | } | |
2413 | /* create CM channel */ | |
2414 | rdma->channel = rdma_create_event_channel(); | |
2415 | if (!rdma->channel) { | |
66988941 | 2416 | ERROR(errp, "could not create rdma event channel"); |
2da776db MH |
2417 | rdma->error_state = -EINVAL; |
2418 | return -1; | |
2419 | } | |
2420 | ||
2421 | /* create CM id */ | |
2422 | ret = rdma_create_id(rdma->channel, &listen_id, NULL, RDMA_PS_TCP); | |
2423 | if (ret) { | |
66988941 | 2424 | ERROR(errp, "could not create cm_id!"); |
2da776db MH |
2425 | goto err_dest_init_create_listen_id; |
2426 | } | |
2427 | ||
b58c8552 MH |
2428 | snprintf(port_str, 16, "%d", rdma->port); |
2429 | port_str[15] = '\0'; | |
2da776db | 2430 | |
1dbd2fd9 MT |
2431 | ret = rdma_getaddrinfo(rdma->host, port_str, NULL, &res); |
2432 | if (ret < 0) { | |
2433 | ERROR(errp, "could not rdma_getaddrinfo address %s", rdma->host); | |
2434 | goto err_dest_init_bind_addr; | |
2435 | } | |
6470215b | 2436 | |
1dbd2fd9 MT |
2437 | for (e = res; e != NULL; e = e->ai_next) { |
2438 | inet_ntop(e->ai_family, | |
2439 | &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); | |
2440 | trace_qemu_rdma_dest_init_trying(rdma->host, ip); | |
2441 | ret = rdma_bind_addr(listen_id, e->ai_dst_addr); | |
2442 | if (ret) { | |
2443 | continue; | |
2da776db | 2444 | } |
1dbd2fd9 MT |
2445 | if (e->ai_family == AF_INET6) { |
2446 | ret = qemu_rdma_broken_ipv6_kernel(errp, listen_id->verbs); | |
2447 | if (ret) { | |
2448 | continue; | |
6470215b MH |
2449 | } |
2450 | } | |
1dbd2fd9 MT |
2451 | break; |
2452 | } | |
b58c8552 | 2453 | |
1dbd2fd9 | 2454 | if (!e) { |
6470215b MH |
2455 | ERROR(errp, "Error: could not rdma_bind_addr!"); |
2456 | goto err_dest_init_bind_addr; | |
2da776db | 2457 | } |
2da776db MH |
2458 | |
2459 | rdma->listen_id = listen_id; | |
2460 | qemu_rdma_dump_gid("dest_init", listen_id); | |
2461 | return 0; | |
2462 | ||
2463 | err_dest_init_bind_addr: | |
2464 | rdma_destroy_id(listen_id); | |
2465 | err_dest_init_create_listen_id: | |
2466 | rdma_destroy_event_channel(rdma->channel); | |
2467 | rdma->channel = NULL; | |
2468 | rdma->error_state = ret; | |
2469 | return ret; | |
2470 | ||
2471 | } | |
2472 | ||
2473 | static void *qemu_rdma_data_init(const char *host_port, Error **errp) | |
2474 | { | |
2475 | RDMAContext *rdma = NULL; | |
2476 | InetSocketAddress *addr; | |
2477 | ||
2478 | if (host_port) { | |
2479 | rdma = g_malloc0(sizeof(RDMAContext)); | |
2da776db MH |
2480 | rdma->current_index = -1; |
2481 | rdma->current_chunk = -1; | |
2482 | ||
2483 | addr = inet_parse(host_port, NULL); | |
2484 | if (addr != NULL) { | |
2485 | rdma->port = atoi(addr->port); | |
2486 | rdma->host = g_strdup(addr->host); | |
2487 | } else { | |
2488 | ERROR(errp, "bad RDMA migration address '%s'", host_port); | |
2489 | g_free(rdma); | |
e325b49a | 2490 | rdma = NULL; |
2da776db | 2491 | } |
e325b49a MH |
2492 | |
2493 | qapi_free_InetSocketAddress(addr); | |
2da776db MH |
2494 | } |
2495 | ||
2496 | return rdma; | |
2497 | } | |
2498 | ||
2499 | /* | |
2500 | * QEMUFile interface to the control channel. | |
2501 | * SEND messages for control only. | |
971ae6ef | 2502 | * VM's ram is handled with regular RDMA messages. |
2da776db MH |
2503 | */ |
2504 | static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, | |
2505 | int64_t pos, int size) | |
2506 | { | |
2507 | QEMUFileRDMA *r = opaque; | |
2508 | QEMUFile *f = r->file; | |
2509 | RDMAContext *rdma = r->rdma; | |
2510 | size_t remaining = size; | |
2511 | uint8_t * data = (void *) buf; | |
2512 | int ret; | |
2513 | ||
2514 | CHECK_ERROR_STATE(); | |
2515 | ||
2516 | /* | |
2517 | * Push out any writes that | |
971ae6ef | 2518 | * we're queued up for VM's ram. |
2da776db MH |
2519 | */ |
2520 | ret = qemu_rdma_write_flush(f, rdma); | |
2521 | if (ret < 0) { | |
2522 | rdma->error_state = ret; | |
2523 | return ret; | |
2524 | } | |
2525 | ||
2526 | while (remaining) { | |
2527 | RDMAControlHeader head; | |
2528 | ||
2529 | r->len = MIN(remaining, RDMA_SEND_INCREMENT); | |
2530 | remaining -= r->len; | |
2531 | ||
2532 | head.len = r->len; | |
2533 | head.type = RDMA_CONTROL_QEMU_FILE; | |
2534 | ||
2535 | ret = qemu_rdma_exchange_send(rdma, &head, data, NULL, NULL, NULL); | |
2536 | ||
2537 | if (ret < 0) { | |
2538 | rdma->error_state = ret; | |
2539 | return ret; | |
2540 | } | |
2541 | ||
2542 | data += r->len; | |
2543 | } | |
2544 | ||
2545 | return size; | |
2546 | } | |
2547 | ||
2548 | static size_t qemu_rdma_fill(RDMAContext *rdma, uint8_t *buf, | |
2549 | int size, int idx) | |
2550 | { | |
2551 | size_t len = 0; | |
2552 | ||
2553 | if (rdma->wr_data[idx].control_len) { | |
733252de | 2554 | trace_qemu_rdma_fill(rdma->wr_data[idx].control_len, size); |
2da776db MH |
2555 | |
2556 | len = MIN(size, rdma->wr_data[idx].control_len); | |
2557 | memcpy(buf, rdma->wr_data[idx].control_curr, len); | |
2558 | rdma->wr_data[idx].control_curr += len; | |
2559 | rdma->wr_data[idx].control_len -= len; | |
2560 | } | |
2561 | ||
2562 | return len; | |
2563 | } | |
2564 | ||
2565 | /* | |
2566 | * QEMUFile interface to the control channel. | |
2567 | * RDMA links don't use bytestreams, so we have to | |
2568 | * return bytes to QEMUFile opportunistically. | |
2569 | */ | |
2570 | static int qemu_rdma_get_buffer(void *opaque, uint8_t *buf, | |
2571 | int64_t pos, int size) | |
2572 | { | |
2573 | QEMUFileRDMA *r = opaque; | |
2574 | RDMAContext *rdma = r->rdma; | |
2575 | RDMAControlHeader head; | |
2576 | int ret = 0; | |
2577 | ||
2578 | CHECK_ERROR_STATE(); | |
2579 | ||
2580 | /* | |
2581 | * First, we hold on to the last SEND message we | |
2582 | * were given and dish out the bytes until we run | |
2583 | * out of bytes. | |
2584 | */ | |
2585 | r->len = qemu_rdma_fill(r->rdma, buf, size, 0); | |
2586 | if (r->len) { | |
2587 | return r->len; | |
2588 | } | |
2589 | ||
2590 | /* | |
2591 | * Once we run out, we block and wait for another | |
2592 | * SEND message to arrive. | |
2593 | */ | |
2594 | ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_QEMU_FILE); | |
2595 | ||
2596 | if (ret < 0) { | |
2597 | rdma->error_state = ret; | |
2598 | return ret; | |
2599 | } | |
2600 | ||
2601 | /* | |
2602 | * SEND was received with new bytes, now try again. | |
2603 | */ | |
2604 | return qemu_rdma_fill(r->rdma, buf, size, 0); | |
2605 | } | |
2606 | ||
2607 | /* | |
2608 | * Block until all the outstanding chunks have been delivered by the hardware. | |
2609 | */ | |
2610 | static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma) | |
2611 | { | |
2612 | int ret; | |
2613 | ||
2614 | if (qemu_rdma_write_flush(f, rdma) < 0) { | |
2615 | return -EIO; | |
2616 | } | |
2617 | ||
2618 | while (rdma->nb_sent) { | |
88571882 | 2619 | ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); |
2da776db | 2620 | if (ret < 0) { |
733252de | 2621 | error_report("rdma migration: complete polling error!"); |
2da776db MH |
2622 | return -EIO; |
2623 | } | |
2624 | } | |
2625 | ||
2626 | qemu_rdma_unregister_waiting(rdma); | |
2627 | ||
2628 | return 0; | |
2629 | } | |
2630 | ||
2631 | static int qemu_rdma_close(void *opaque) | |
2632 | { | |
733252de | 2633 | trace_qemu_rdma_close(); |
2da776db MH |
2634 | QEMUFileRDMA *r = opaque; |
2635 | if (r->rdma) { | |
2636 | qemu_rdma_cleanup(r->rdma); | |
2637 | g_free(r->rdma); | |
2638 | } | |
2639 | g_free(r); | |
2640 | return 0; | |
2641 | } | |
2642 | ||
2643 | /* | |
2644 | * Parameters: | |
2645 | * @offset == 0 : | |
2646 | * This means that 'block_offset' is a full virtual address that does not | |
2647 | * belong to a RAMBlock of the virtual machine and instead | |
2648 | * represents a private malloc'd memory area that the caller wishes to | |
2649 | * transfer. | |
2650 | * | |
2651 | * @offset != 0 : | |
2652 | * Offset is an offset to be added to block_offset and used | |
2653 | * to also lookup the corresponding RAMBlock. | |
2654 | * | |
2655 | * @size > 0 : | |
2656 | * Initiate an transfer this size. | |
2657 | * | |
2658 | * @size == 0 : | |
2659 | * A 'hint' or 'advice' that means that we wish to speculatively | |
2660 | * and asynchronously unregister this memory. In this case, there is no | |
52f35022 | 2661 | * guarantee that the unregister will actually happen, for example, |
2da776db MH |
2662 | * if the memory is being actively transmitted. Additionally, the memory |
2663 | * may be re-registered at any future time if a write within the same | |
2664 | * chunk was requested again, even if you attempted to unregister it | |
2665 | * here. | |
2666 | * | |
2667 | * @size < 0 : TODO, not yet supported | |
2668 | * Unregister the memory NOW. This means that the caller does not | |
2669 | * expect there to be any future RDMA transfers and we just want to clean | |
2670 | * things up. This is used in case the upper layer owns the memory and | |
2671 | * cannot wait for qemu_fclose() to occur. | |
2672 | * | |
2673 | * @bytes_sent : User-specificed pointer to indicate how many bytes were | |
2674 | * sent. Usually, this will not be more than a few bytes of | |
2675 | * the protocol because most transfers are sent asynchronously. | |
2676 | */ | |
2677 | static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, | |
2678 | ram_addr_t block_offset, ram_addr_t offset, | |
6e1dea46 | 2679 | size_t size, uint64_t *bytes_sent) |
2da776db MH |
2680 | { |
2681 | QEMUFileRDMA *rfile = opaque; | |
2682 | RDMAContext *rdma = rfile->rdma; | |
2683 | int ret; | |
2684 | ||
2685 | CHECK_ERROR_STATE(); | |
2686 | ||
2687 | qemu_fflush(f); | |
2688 | ||
2689 | if (size > 0) { | |
2690 | /* | |
2691 | * Add this page to the current 'chunk'. If the chunk | |
2692 | * is full, or the page doen't belong to the current chunk, | |
2693 | * an actual RDMA write will occur and a new chunk will be formed. | |
2694 | */ | |
2695 | ret = qemu_rdma_write(f, rdma, block_offset, offset, size); | |
2696 | if (ret < 0) { | |
733252de | 2697 | error_report("rdma migration: write error! %d", ret); |
2da776db MH |
2698 | goto err; |
2699 | } | |
2700 | ||
2701 | /* | |
2702 | * We always return 1 bytes because the RDMA | |
2703 | * protocol is completely asynchronous. We do not yet know | |
2704 | * whether an identified chunk is zero or not because we're | |
2705 | * waiting for other pages to potentially be merged with | |
2706 | * the current chunk. So, we have to call qemu_update_position() | |
2707 | * later on when the actual write occurs. | |
2708 | */ | |
2709 | if (bytes_sent) { | |
2710 | *bytes_sent = 1; | |
2711 | } | |
2712 | } else { | |
2713 | uint64_t index, chunk; | |
2714 | ||
2715 | /* TODO: Change QEMUFileOps prototype to be signed: size_t => long | |
2716 | if (size < 0) { | |
2717 | ret = qemu_rdma_drain_cq(f, rdma); | |
2718 | if (ret < 0) { | |
2719 | fprintf(stderr, "rdma: failed to synchronously drain" | |
2720 | " completion queue before unregistration.\n"); | |
2721 | goto err; | |
2722 | } | |
2723 | } | |
2724 | */ | |
2725 | ||
2726 | ret = qemu_rdma_search_ram_block(rdma, block_offset, | |
2727 | offset, size, &index, &chunk); | |
2728 | ||
2729 | if (ret) { | |
733252de | 2730 | error_report("ram block search failed"); |
2da776db MH |
2731 | goto err; |
2732 | } | |
2733 | ||
2734 | qemu_rdma_signal_unregister(rdma, index, chunk, 0); | |
2735 | ||
2736 | /* | |
52f35022 | 2737 | * TODO: Synchronous, guaranteed unregistration (should not occur during |
2da776db MH |
2738 | * fast-path). Otherwise, unregisters will process on the next call to |
2739 | * qemu_rdma_drain_cq() | |
2740 | if (size < 0) { | |
2741 | qemu_rdma_unregister_waiting(rdma); | |
2742 | } | |
2743 | */ | |
2744 | } | |
2745 | ||
2746 | /* | |
2747 | * Drain the Completion Queue if possible, but do not block, | |
2748 | * just poll. | |
2749 | * | |
2750 | * If nothing to poll, the end of the iteration will do this | |
2751 | * again to make sure we don't overflow the request queue. | |
2752 | */ | |
2753 | while (1) { | |
2754 | uint64_t wr_id, wr_id_in; | |
88571882 | 2755 | int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL); |
2da776db | 2756 | if (ret < 0) { |
733252de | 2757 | error_report("rdma migration: polling error! %d", ret); |
2da776db MH |
2758 | goto err; |
2759 | } | |
2760 | ||
2761 | wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; | |
2762 | ||
2763 | if (wr_id == RDMA_WRID_NONE) { | |
2764 | break; | |
2765 | } | |
2766 | } | |
2767 | ||
2768 | return RAM_SAVE_CONTROL_DELAYED; | |
2769 | err: | |
2770 | rdma->error_state = ret; | |
2771 | return ret; | |
2772 | } | |
2773 | ||
2774 | static int qemu_rdma_accept(RDMAContext *rdma) | |
2775 | { | |
2776 | RDMACapabilities cap; | |
2777 | struct rdma_conn_param conn_param = { | |
2778 | .responder_resources = 2, | |
2779 | .private_data = &cap, | |
2780 | .private_data_len = sizeof(cap), | |
2781 | }; | |
2782 | struct rdma_cm_event *cm_event; | |
2783 | struct ibv_context *verbs; | |
2784 | int ret = -EINVAL; | |
2785 | int idx; | |
2786 | ||
2787 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2788 | if (ret) { | |
2789 | goto err_rdma_dest_wait; | |
2790 | } | |
2791 | ||
2792 | if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) { | |
2793 | rdma_ack_cm_event(cm_event); | |
2794 | goto err_rdma_dest_wait; | |
2795 | } | |
2796 | ||
2797 | memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); | |
2798 | ||
2799 | network_to_caps(&cap); | |
2800 | ||
2801 | if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) { | |
733252de | 2802 | error_report("Unknown source RDMA version: %d, bailing...", |
2da776db MH |
2803 | cap.version); |
2804 | rdma_ack_cm_event(cm_event); | |
2805 | goto err_rdma_dest_wait; | |
2806 | } | |
2807 | ||
2808 | /* | |
2809 | * Respond with only the capabilities this version of QEMU knows about. | |
2810 | */ | |
2811 | cap.flags &= known_capabilities; | |
2812 | ||
2813 | /* | |
2814 | * Enable the ones that we do know about. | |
2815 | * Add other checks here as new ones are introduced. | |
2816 | */ | |
2817 | if (cap.flags & RDMA_CAPABILITY_PIN_ALL) { | |
2818 | rdma->pin_all = true; | |
2819 | } | |
2820 | ||
2821 | rdma->cm_id = cm_event->id; | |
2822 | verbs = cm_event->id->verbs; | |
2823 | ||
2824 | rdma_ack_cm_event(cm_event); | |
2825 | ||
733252de | 2826 | trace_qemu_rdma_accept_pin_state(rdma->pin_all); |
2da776db MH |
2827 | |
2828 | caps_to_network(&cap); | |
2829 | ||
733252de | 2830 | trace_qemu_rdma_accept_pin_verbsc(verbs); |
2da776db MH |
2831 | |
2832 | if (!rdma->verbs) { | |
2833 | rdma->verbs = verbs; | |
2834 | } else if (rdma->verbs != verbs) { | |
733252de DDAG |
2835 | error_report("ibv context not matching %p, %p!", rdma->verbs, |
2836 | verbs); | |
2da776db MH |
2837 | goto err_rdma_dest_wait; |
2838 | } | |
2839 | ||
2840 | qemu_rdma_dump_id("dest_init", verbs); | |
2841 | ||
2842 | ret = qemu_rdma_alloc_pd_cq(rdma); | |
2843 | if (ret) { | |
733252de | 2844 | error_report("rdma migration: error allocating pd and cq!"); |
2da776db MH |
2845 | goto err_rdma_dest_wait; |
2846 | } | |
2847 | ||
2848 | ret = qemu_rdma_alloc_qp(rdma); | |
2849 | if (ret) { | |
733252de | 2850 | error_report("rdma migration: error allocating qp!"); |
2da776db MH |
2851 | goto err_rdma_dest_wait; |
2852 | } | |
2853 | ||
2854 | ret = qemu_rdma_init_ram_blocks(rdma); | |
2855 | if (ret) { | |
733252de | 2856 | error_report("rdma migration: error initializing ram blocks!"); |
2da776db MH |
2857 | goto err_rdma_dest_wait; |
2858 | } | |
2859 | ||
1f22364b | 2860 | for (idx = 0; idx < RDMA_WRID_MAX; idx++) { |
2da776db MH |
2861 | ret = qemu_rdma_reg_control(rdma, idx); |
2862 | if (ret) { | |
733252de | 2863 | error_report("rdma: error registering %d control", idx); |
2da776db MH |
2864 | goto err_rdma_dest_wait; |
2865 | } | |
2866 | } | |
2867 | ||
82e1cc4b | 2868 | qemu_set_fd_handler(rdma->channel->fd, NULL, NULL, NULL); |
2da776db MH |
2869 | |
2870 | ret = rdma_accept(rdma->cm_id, &conn_param); | |
2871 | if (ret) { | |
733252de | 2872 | error_report("rdma_accept returns %d", ret); |
2da776db MH |
2873 | goto err_rdma_dest_wait; |
2874 | } | |
2875 | ||
2876 | ret = rdma_get_cm_event(rdma->channel, &cm_event); | |
2877 | if (ret) { | |
733252de | 2878 | error_report("rdma_accept get_cm_event failed %d", ret); |
2da776db MH |
2879 | goto err_rdma_dest_wait; |
2880 | } | |
2881 | ||
2882 | if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { | |
733252de | 2883 | error_report("rdma_accept not event established"); |
2da776db MH |
2884 | rdma_ack_cm_event(cm_event); |
2885 | goto err_rdma_dest_wait; | |
2886 | } | |
2887 | ||
2888 | rdma_ack_cm_event(cm_event); | |
5a91337c | 2889 | rdma->connected = true; |
2da776db | 2890 | |
87772639 | 2891 | ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); |
2da776db | 2892 | if (ret) { |
733252de | 2893 | error_report("rdma migration: error posting second control recv"); |
2da776db MH |
2894 | goto err_rdma_dest_wait; |
2895 | } | |
2896 | ||
2897 | qemu_rdma_dump_gid("dest_connect", rdma->cm_id); | |
2898 | ||
2899 | return 0; | |
2900 | ||
2901 | err_rdma_dest_wait: | |
2902 | rdma->error_state = ret; | |
2903 | qemu_rdma_cleanup(rdma); | |
2904 | return ret; | |
2905 | } | |
2906 | ||
2907 | /* | |
2908 | * During each iteration of the migration, we listen for instructions | |
2909 | * by the source VM to perform dynamic page registrations before they | |
2910 | * can perform RDMA operations. | |
2911 | * | |
2912 | * We respond with the 'rkey'. | |
2913 | * | |
2914 | * Keep doing this until the source tells us to stop. | |
2915 | */ | |
2916 | static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque, | |
2917 | uint64_t flags) | |
2918 | { | |
2919 | RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult), | |
2920 | .type = RDMA_CONTROL_REGISTER_RESULT, | |
2921 | .repeat = 0, | |
2922 | }; | |
2923 | RDMAControlHeader unreg_resp = { .len = 0, | |
2924 | .type = RDMA_CONTROL_UNREGISTER_FINISHED, | |
2925 | .repeat = 0, | |
2926 | }; | |
2927 | RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT, | |
2928 | .repeat = 1 }; | |
2929 | QEMUFileRDMA *rfile = opaque; | |
2930 | RDMAContext *rdma = rfile->rdma; | |
2931 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
2932 | RDMAControlHeader head; | |
2933 | RDMARegister *reg, *registers; | |
2934 | RDMACompress *comp; | |
2935 | RDMARegisterResult *reg_result; | |
2936 | static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE]; | |
2937 | RDMALocalBlock *block; | |
2938 | void *host_addr; | |
2939 | int ret = 0; | |
2940 | int idx = 0; | |
2941 | int count = 0; | |
2942 | int i = 0; | |
2943 | ||
2944 | CHECK_ERROR_STATE(); | |
2945 | ||
2946 | do { | |
733252de | 2947 | trace_qemu_rdma_registration_handle_wait(flags); |
2da776db MH |
2948 | |
2949 | ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE); | |
2950 | ||
2951 | if (ret < 0) { | |
2952 | break; | |
2953 | } | |
2954 | ||
2955 | if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) { | |
733252de DDAG |
2956 | error_report("rdma: Too many requests in this message (%d)." |
2957 | "Bailing.", head.repeat); | |
2da776db MH |
2958 | ret = -EIO; |
2959 | break; | |
2960 | } | |
2961 | ||
2962 | switch (head.type) { | |
2963 | case RDMA_CONTROL_COMPRESS: | |
2964 | comp = (RDMACompress *) rdma->wr_data[idx].control_curr; | |
2965 | network_to_compress(comp); | |
2966 | ||
733252de DDAG |
2967 | trace_qemu_rdma_registration_handle_compress(comp->length, |
2968 | comp->block_idx, | |
2969 | comp->offset); | |
2da776db MH |
2970 | block = &(rdma->local_ram_blocks.block[comp->block_idx]); |
2971 | ||
2972 | host_addr = block->local_host_addr + | |
2973 | (comp->offset - block->offset); | |
2974 | ||
2975 | ram_handle_compressed(host_addr, comp->value, comp->length); | |
2976 | break; | |
2977 | ||
2978 | case RDMA_CONTROL_REGISTER_FINISHED: | |
733252de | 2979 | trace_qemu_rdma_registration_handle_finished(); |
2da776db MH |
2980 | goto out; |
2981 | ||
2982 | case RDMA_CONTROL_RAM_BLOCKS_REQUEST: | |
733252de | 2983 | trace_qemu_rdma_registration_handle_ram_blocks(); |
2da776db MH |
2984 | |
2985 | if (rdma->pin_all) { | |
2986 | ret = qemu_rdma_reg_whole_ram_blocks(rdma); | |
2987 | if (ret) { | |
733252de DDAG |
2988 | error_report("rdma migration: error dest " |
2989 | "registering ram blocks"); | |
2da776db MH |
2990 | goto out; |
2991 | } | |
2992 | } | |
2993 | ||
2994 | /* | |
2995 | * Dest uses this to prepare to transmit the RAMBlock descriptions | |
2996 | * to the source VM after connection setup. | |
2997 | * Both sides use the "remote" structure to communicate and update | |
2998 | * their "local" descriptions with what was sent. | |
2999 | */ | |
3000 | for (i = 0; i < local->nb_blocks; i++) { | |
a97270ad | 3001 | rdma->dest_blocks[i].remote_host_addr = |
fbce8c25 | 3002 | (uintptr_t)(local->block[i].local_host_addr); |
2da776db MH |
3003 | |
3004 | if (rdma->pin_all) { | |
a97270ad | 3005 | rdma->dest_blocks[i].remote_rkey = local->block[i].mr->rkey; |
2da776db MH |
3006 | } |
3007 | ||
a97270ad DDAG |
3008 | rdma->dest_blocks[i].offset = local->block[i].offset; |
3009 | rdma->dest_blocks[i].length = local->block[i].length; | |
2da776db | 3010 | |
a97270ad | 3011 | dest_block_to_network(&rdma->dest_blocks[i]); |
2da776db MH |
3012 | } |
3013 | ||
3014 | blocks.len = rdma->local_ram_blocks.nb_blocks | |
a97270ad | 3015 | * sizeof(RDMADestBlock); |
2da776db MH |
3016 | |
3017 | ||
3018 | ret = qemu_rdma_post_send_control(rdma, | |
a97270ad | 3019 | (uint8_t *) rdma->dest_blocks, &blocks); |
2da776db MH |
3020 | |
3021 | if (ret < 0) { | |
733252de | 3022 | error_report("rdma migration: error sending remote info"); |
2da776db MH |
3023 | goto out; |
3024 | } | |
3025 | ||
3026 | break; | |
3027 | case RDMA_CONTROL_REGISTER_REQUEST: | |
733252de | 3028 | trace_qemu_rdma_registration_handle_register(head.repeat); |
2da776db MH |
3029 | |
3030 | reg_resp.repeat = head.repeat; | |
3031 | registers = (RDMARegister *) rdma->wr_data[idx].control_curr; | |
3032 | ||
3033 | for (count = 0; count < head.repeat; count++) { | |
3034 | uint64_t chunk; | |
3035 | uint8_t *chunk_start, *chunk_end; | |
3036 | ||
3037 | reg = ®isters[count]; | |
3038 | network_to_register(reg); | |
3039 | ||
3040 | reg_result = &results[count]; | |
3041 | ||
733252de | 3042 | trace_qemu_rdma_registration_handle_register_loop(count, |
2da776db MH |
3043 | reg->current_index, reg->key.current_addr, reg->chunks); |
3044 | ||
3045 | block = &(rdma->local_ram_blocks.block[reg->current_index]); | |
3046 | if (block->is_ram_block) { | |
3047 | host_addr = (block->local_host_addr + | |
3048 | (reg->key.current_addr - block->offset)); | |
3049 | chunk = ram_chunk_index(block->local_host_addr, | |
3050 | (uint8_t *) host_addr); | |
3051 | } else { | |
3052 | chunk = reg->key.chunk; | |
3053 | host_addr = block->local_host_addr + | |
3054 | (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); | |
3055 | } | |
3056 | chunk_start = ram_chunk_start(block, chunk); | |
3057 | chunk_end = ram_chunk_end(block, chunk + reg->chunks); | |
3058 | if (qemu_rdma_register_and_get_keys(rdma, block, | |
3ac040c0 | 3059 | (uintptr_t)host_addr, NULL, ®_result->rkey, |
2da776db | 3060 | chunk, chunk_start, chunk_end)) { |
733252de | 3061 | error_report("cannot get rkey"); |
2da776db MH |
3062 | ret = -EINVAL; |
3063 | goto out; | |
3064 | } | |
3065 | ||
fbce8c25 | 3066 | reg_result->host_addr = (uintptr_t)block->local_host_addr; |
2da776db | 3067 | |
733252de DDAG |
3068 | trace_qemu_rdma_registration_handle_register_rkey( |
3069 | reg_result->rkey); | |
2da776db MH |
3070 | |
3071 | result_to_network(reg_result); | |
3072 | } | |
3073 | ||
3074 | ret = qemu_rdma_post_send_control(rdma, | |
3075 | (uint8_t *) results, ®_resp); | |
3076 | ||
3077 | if (ret < 0) { | |
733252de | 3078 | error_report("Failed to send control buffer"); |
2da776db MH |
3079 | goto out; |
3080 | } | |
3081 | break; | |
3082 | case RDMA_CONTROL_UNREGISTER_REQUEST: | |
733252de | 3083 | trace_qemu_rdma_registration_handle_unregister(head.repeat); |
2da776db MH |
3084 | unreg_resp.repeat = head.repeat; |
3085 | registers = (RDMARegister *) rdma->wr_data[idx].control_curr; | |
3086 | ||
3087 | for (count = 0; count < head.repeat; count++) { | |
3088 | reg = ®isters[count]; | |
3089 | network_to_register(reg); | |
3090 | ||
733252de DDAG |
3091 | trace_qemu_rdma_registration_handle_unregister_loop(count, |
3092 | reg->current_index, reg->key.chunk); | |
2da776db MH |
3093 | |
3094 | block = &(rdma->local_ram_blocks.block[reg->current_index]); | |
3095 | ||
3096 | ret = ibv_dereg_mr(block->pmr[reg->key.chunk]); | |
3097 | block->pmr[reg->key.chunk] = NULL; | |
3098 | ||
3099 | if (ret != 0) { | |
3100 | perror("rdma unregistration chunk failed"); | |
3101 | ret = -ret; | |
3102 | goto out; | |
3103 | } | |
3104 | ||
3105 | rdma->total_registrations--; | |
3106 | ||
733252de DDAG |
3107 | trace_qemu_rdma_registration_handle_unregister_success( |
3108 | reg->key.chunk); | |
2da776db MH |
3109 | } |
3110 | ||
3111 | ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp); | |
3112 | ||
3113 | if (ret < 0) { | |
733252de | 3114 | error_report("Failed to send control buffer"); |
2da776db MH |
3115 | goto out; |
3116 | } | |
3117 | break; | |
3118 | case RDMA_CONTROL_REGISTER_RESULT: | |
733252de | 3119 | error_report("Invalid RESULT message at dest."); |
2da776db MH |
3120 | ret = -EIO; |
3121 | goto out; | |
3122 | default: | |
733252de | 3123 | error_report("Unknown control message %s", control_desc[head.type]); |
2da776db MH |
3124 | ret = -EIO; |
3125 | goto out; | |
3126 | } | |
3127 | } while (1); | |
3128 | out: | |
3129 | if (ret < 0) { | |
3130 | rdma->error_state = ret; | |
3131 | } | |
3132 | return ret; | |
3133 | } | |
3134 | ||
3135 | static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, | |
3136 | uint64_t flags) | |
3137 | { | |
3138 | QEMUFileRDMA *rfile = opaque; | |
3139 | RDMAContext *rdma = rfile->rdma; | |
3140 | ||
3141 | CHECK_ERROR_STATE(); | |
3142 | ||
733252de | 3143 | trace_qemu_rdma_registration_start(flags); |
2da776db MH |
3144 | qemu_put_be64(f, RAM_SAVE_FLAG_HOOK); |
3145 | qemu_fflush(f); | |
3146 | ||
3147 | return 0; | |
3148 | } | |
3149 | ||
3150 | /* | |
3151 | * Inform dest that dynamic registrations are done for now. | |
3152 | * First, flush writes, if any. | |
3153 | */ | |
3154 | static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, | |
3155 | uint64_t flags) | |
3156 | { | |
3157 | Error *local_err = NULL, **errp = &local_err; | |
3158 | QEMUFileRDMA *rfile = opaque; | |
3159 | RDMAContext *rdma = rfile->rdma; | |
3160 | RDMAControlHeader head = { .len = 0, .repeat = 1 }; | |
3161 | int ret = 0; | |
3162 | ||
3163 | CHECK_ERROR_STATE(); | |
3164 | ||
3165 | qemu_fflush(f); | |
3166 | ret = qemu_rdma_drain_cq(f, rdma); | |
3167 | ||
3168 | if (ret < 0) { | |
3169 | goto err; | |
3170 | } | |
3171 | ||
3172 | if (flags == RAM_CONTROL_SETUP) { | |
3173 | RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; | |
3174 | RDMALocalBlocks *local = &rdma->local_ram_blocks; | |
a97270ad | 3175 | int reg_result_idx, i, j, nb_dest_blocks; |
2da776db MH |
3176 | |
3177 | head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; | |
733252de | 3178 | trace_qemu_rdma_registration_stop_ram(); |
2da776db MH |
3179 | |
3180 | /* | |
3181 | * Make sure that we parallelize the pinning on both sides. | |
3182 | * For very large guests, doing this serially takes a really | |
3183 | * long time, so we have to 'interleave' the pinning locally | |
3184 | * with the control messages by performing the pinning on this | |
3185 | * side before we receive the control response from the other | |
3186 | * side that the pinning has completed. | |
3187 | */ | |
3188 | ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, | |
3189 | ®_result_idx, rdma->pin_all ? | |
3190 | qemu_rdma_reg_whole_ram_blocks : NULL); | |
3191 | if (ret < 0) { | |
66988941 | 3192 | ERROR(errp, "receiving remote info!"); |
2da776db MH |
3193 | return ret; |
3194 | } | |
3195 | ||
a97270ad | 3196 | nb_dest_blocks = resp.len / sizeof(RDMADestBlock); |
2da776db MH |
3197 | |
3198 | /* | |
3199 | * The protocol uses two different sets of rkeys (mutually exclusive): | |
3200 | * 1. One key to represent the virtual address of the entire ram block. | |
3201 | * (dynamic chunk registration disabled - pin everything with one rkey.) | |
3202 | * 2. One to represent individual chunks within a ram block. | |
3203 | * (dynamic chunk registration enabled - pin individual chunks.) | |
3204 | * | |
3205 | * Once the capability is successfully negotiated, the destination transmits | |
3206 | * the keys to use (or sends them later) including the virtual addresses | |
3207 | * and then propagates the remote ram block descriptions to his local copy. | |
3208 | */ | |
3209 | ||
a97270ad | 3210 | if (local->nb_blocks != nb_dest_blocks) { |
2da776db MH |
3211 | ERROR(errp, "ram blocks mismatch #1! " |
3212 | "Your QEMU command line parameters are probably " | |
66988941 | 3213 | "not identical on both the source and destination."); |
2da776db MH |
3214 | return -EINVAL; |
3215 | } | |
3216 | ||
885e8f98 | 3217 | qemu_rdma_move_header(rdma, reg_result_idx, &resp); |
a97270ad | 3218 | memcpy(rdma->dest_blocks, |
885e8f98 | 3219 | rdma->wr_data[reg_result_idx].control_curr, resp.len); |
a97270ad DDAG |
3220 | for (i = 0; i < nb_dest_blocks; i++) { |
3221 | network_to_dest_block(&rdma->dest_blocks[i]); | |
2da776db MH |
3222 | |
3223 | /* search local ram blocks */ | |
3224 | for (j = 0; j < local->nb_blocks; j++) { | |
a97270ad | 3225 | if (rdma->dest_blocks[i].offset != local->block[j].offset) { |
2da776db MH |
3226 | continue; |
3227 | } | |
3228 | ||
a97270ad | 3229 | if (rdma->dest_blocks[i].length != local->block[j].length) { |
2da776db MH |
3230 | ERROR(errp, "ram blocks mismatch #2! " |
3231 | "Your QEMU command line parameters are probably " | |
66988941 | 3232 | "not identical on both the source and destination."); |
2da776db MH |
3233 | return -EINVAL; |
3234 | } | |
3235 | local->block[j].remote_host_addr = | |
a97270ad DDAG |
3236 | rdma->dest_blocks[i].remote_host_addr; |
3237 | local->block[j].remote_rkey = rdma->dest_blocks[i].remote_rkey; | |
2da776db MH |
3238 | break; |
3239 | } | |
3240 | ||
3241 | if (j >= local->nb_blocks) { | |
3242 | ERROR(errp, "ram blocks mismatch #3! " | |
3243 | "Your QEMU command line parameters are probably " | |
66988941 | 3244 | "not identical on both the source and destination."); |
2da776db MH |
3245 | return -EINVAL; |
3246 | } | |
3247 | } | |
3248 | } | |
3249 | ||
733252de | 3250 | trace_qemu_rdma_registration_stop(flags); |
2da776db MH |
3251 | |
3252 | head.type = RDMA_CONTROL_REGISTER_FINISHED; | |
3253 | ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL); | |
3254 | ||
3255 | if (ret < 0) { | |
3256 | goto err; | |
3257 | } | |
3258 | ||
3259 | return 0; | |
3260 | err: | |
3261 | rdma->error_state = ret; | |
3262 | return ret; | |
3263 | } | |
3264 | ||
3265 | static int qemu_rdma_get_fd(void *opaque) | |
3266 | { | |
3267 | QEMUFileRDMA *rfile = opaque; | |
3268 | RDMAContext *rdma = rfile->rdma; | |
3269 | ||
3270 | return rdma->comp_channel->fd; | |
3271 | } | |
3272 | ||
2ae31aea | 3273 | static const QEMUFileOps rdma_read_ops = { |
2da776db MH |
3274 | .get_buffer = qemu_rdma_get_buffer, |
3275 | .get_fd = qemu_rdma_get_fd, | |
3276 | .close = qemu_rdma_close, | |
3277 | .hook_ram_load = qemu_rdma_registration_handle, | |
3278 | }; | |
3279 | ||
2ae31aea | 3280 | static const QEMUFileOps rdma_write_ops = { |
2da776db MH |
3281 | .put_buffer = qemu_rdma_put_buffer, |
3282 | .close = qemu_rdma_close, | |
3283 | .before_ram_iterate = qemu_rdma_registration_start, | |
3284 | .after_ram_iterate = qemu_rdma_registration_stop, | |
3285 | .save_page = qemu_rdma_save_page, | |
3286 | }; | |
3287 | ||
3288 | static void *qemu_fopen_rdma(RDMAContext *rdma, const char *mode) | |
3289 | { | |
728470be | 3290 | QEMUFileRDMA *r; |
2da776db MH |
3291 | |
3292 | if (qemu_file_mode_is_not_valid(mode)) { | |
3293 | return NULL; | |
3294 | } | |
3295 | ||
728470be | 3296 | r = g_malloc0(sizeof(QEMUFileRDMA)); |
2da776db MH |
3297 | r->rdma = rdma; |
3298 | ||
3299 | if (mode[0] == 'w') { | |
3300 | r->file = qemu_fopen_ops(r, &rdma_write_ops); | |
3301 | } else { | |
3302 | r->file = qemu_fopen_ops(r, &rdma_read_ops); | |
3303 | } | |
3304 | ||
3305 | return r->file; | |
3306 | } | |
3307 | ||
3308 | static void rdma_accept_incoming_migration(void *opaque) | |
3309 | { | |
3310 | RDMAContext *rdma = opaque; | |
3311 | int ret; | |
3312 | QEMUFile *f; | |
3313 | Error *local_err = NULL, **errp = &local_err; | |
3314 | ||
24ec68ef | 3315 | trace_qemu_rdma_accept_incoming_migration(); |
2da776db MH |
3316 | ret = qemu_rdma_accept(rdma); |
3317 | ||
3318 | if (ret) { | |
66988941 | 3319 | ERROR(errp, "RDMA Migration initialization failed!"); |
2da776db MH |
3320 | return; |
3321 | } | |
3322 | ||
24ec68ef | 3323 | trace_qemu_rdma_accept_incoming_migration_accepted(); |
2da776db MH |
3324 | |
3325 | f = qemu_fopen_rdma(rdma, "rb"); | |
3326 | if (f == NULL) { | |
66988941 | 3327 | ERROR(errp, "could not qemu_fopen_rdma!"); |
2da776db MH |
3328 | qemu_rdma_cleanup(rdma); |
3329 | return; | |
3330 | } | |
3331 | ||
3332 | rdma->migration_started_on_destination = 1; | |
3333 | process_incoming_migration(f); | |
3334 | } | |
3335 | ||
3336 | void rdma_start_incoming_migration(const char *host_port, Error **errp) | |
3337 | { | |
3338 | int ret; | |
3339 | RDMAContext *rdma; | |
3340 | Error *local_err = NULL; | |
3341 | ||
733252de | 3342 | trace_rdma_start_incoming_migration(); |
2da776db MH |
3343 | rdma = qemu_rdma_data_init(host_port, &local_err); |
3344 | ||
3345 | if (rdma == NULL) { | |
3346 | goto err; | |
3347 | } | |
3348 | ||
3349 | ret = qemu_rdma_dest_init(rdma, &local_err); | |
3350 | ||
3351 | if (ret) { | |
3352 | goto err; | |
3353 | } | |
3354 | ||
733252de | 3355 | trace_rdma_start_incoming_migration_after_dest_init(); |
2da776db MH |
3356 | |
3357 | ret = rdma_listen(rdma->listen_id, 5); | |
3358 | ||
3359 | if (ret) { | |
66988941 | 3360 | ERROR(errp, "listening on socket!"); |
2da776db MH |
3361 | goto err; |
3362 | } | |
3363 | ||
733252de | 3364 | trace_rdma_start_incoming_migration_after_rdma_listen(); |
2da776db | 3365 | |
82e1cc4b FZ |
3366 | qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration, |
3367 | NULL, (void *)(intptr_t)rdma); | |
2da776db MH |
3368 | return; |
3369 | err: | |
3370 | error_propagate(errp, local_err); | |
3371 | g_free(rdma); | |
3372 | } | |
3373 | ||
3374 | void rdma_start_outgoing_migration(void *opaque, | |
3375 | const char *host_port, Error **errp) | |
3376 | { | |
3377 | MigrationState *s = opaque; | |
3378 | Error *local_err = NULL, **temp = &local_err; | |
3379 | RDMAContext *rdma = qemu_rdma_data_init(host_port, &local_err); | |
3380 | int ret = 0; | |
3381 | ||
3382 | if (rdma == NULL) { | |
66988941 | 3383 | ERROR(temp, "Failed to initialize RDMA data structures! %d", ret); |
2da776db MH |
3384 | goto err; |
3385 | } | |
3386 | ||
3387 | ret = qemu_rdma_source_init(rdma, &local_err, | |
41310c68 | 3388 | s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL]); |
2da776db MH |
3389 | |
3390 | if (ret) { | |
3391 | goto err; | |
3392 | } | |
3393 | ||
733252de | 3394 | trace_rdma_start_outgoing_migration_after_rdma_source_init(); |
2da776db MH |
3395 | ret = qemu_rdma_connect(rdma, &local_err); |
3396 | ||
3397 | if (ret) { | |
3398 | goto err; | |
3399 | } | |
3400 | ||
733252de | 3401 | trace_rdma_start_outgoing_migration_after_rdma_connect(); |
2da776db MH |
3402 | |
3403 | s->file = qemu_fopen_rdma(rdma, "wb"); | |
3404 | migrate_fd_connect(s); | |
3405 | return; | |
3406 | err: | |
3407 | error_propagate(errp, local_err); | |
3408 | g_free(rdma); | |
3409 | migrate_fd_error(s); | |
3410 | } |