]>
Commit | Line | Data |
---|---|---|
ef6d4ccd YS |
1 | /* |
2 | * QEMU paravirtual RDMA - Generic RDMA backend | |
3 | * | |
4 | * Copyright (C) 2018 Oracle | |
5 | * Copyright (C) 2018 Red Hat Inc | |
6 | * | |
7 | * Authors: | |
8 | * Yuval Shaia <yuval.shaia@oracle.com> | |
9 | * Marcel Apfelbaum <marcel@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
0efc9511 | 16 | #include "qemu/osdep.h" |
2b05705d | 17 | #include "qapi/qapi-events-rdma.h" |
ef6d4ccd YS |
18 | |
19 | #include <infiniband/verbs.h> | |
20 | ||
2b05705d | 21 | #include "contrib/rdmacm-mux/rdmacm-mux.h" |
ef6d4ccd YS |
22 | #include "trace.h" |
23 | #include "rdma_utils.h" | |
24 | #include "rdma_rm.h" | |
25 | #include "rdma_backend.h" | |
26 | ||
ef6d4ccd | 27 | #define THR_NAME_LEN 16 |
75152227 | 28 | #define THR_POLL_TO 5000 |
ef6d4ccd | 29 | |
605ec166 YS |
30 | #define MAD_HDR_SIZE sizeof(struct ibv_grh) |
31 | ||
ef6d4ccd | 32 | typedef struct BackendCtx { |
ef6d4ccd | 33 | void *up_ctx; |
605ec166 | 34 | struct ibv_sge sge; /* Used to save MAD recv buffer */ |
bf441451 | 35 | RdmaBackendQP *backend_qp; /* To maintain recv buffers */ |
e926c9f1 | 36 | RdmaBackendSRQ *backend_srq; |
ef6d4ccd YS |
37 | } BackendCtx; |
38 | ||
605ec166 YS |
39 | struct backend_umad { |
40 | struct ib_user_mad hdr; | |
41 | char mad[RDMA_MAX_PRIVATE_DATA]; | |
42 | }; | |
43 | ||
eaac0100 | 44 | static void (*comp_handler)(void *ctx, struct ibv_wc *wc); |
ef6d4ccd | 45 | |
eaac0100 | 46 | static void dummy_comp_handler(void *ctx, struct ibv_wc *wc) |
ef6d4ccd | 47 | { |
4d71b38a | 48 | rdma_error_report("No completion handler is registered"); |
ef6d4ccd YS |
49 | } |
50 | ||
eaac0100 YS |
51 | static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, |
52 | void *ctx) | |
53 | { | |
a421c811 | 54 | struct ibv_wc wc = {}; |
eaac0100 YS |
55 | |
56 | wc.status = status; | |
57 | wc.vendor_err = vendor_err; | |
58 | ||
59 | comp_handler(ctx, &wc); | |
60 | } | |
61 | ||
ff30a446 YS |
62 | static void free_cqe_ctx(gpointer data, gpointer user_data) |
63 | { | |
64 | BackendCtx *bctx; | |
65 | RdmaDeviceResources *rdma_dev_res = user_data; | |
66 | unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); | |
67 | ||
68 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
69 | if (bctx) { | |
70 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
bf441451 | 71 | atomic_dec(&rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
72 | } |
73 | g_free(bctx); | |
74 | } | |
75 | ||
76 | static void clean_recv_mads(RdmaBackendDev *backend_dev) | |
77 | { | |
78 | unsigned long cqe_ctx_id; | |
79 | ||
80 | do { | |
81 | cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> | |
82 | recv_mads_list); | |
83 | if (cqe_ctx_id != -ENOENT) { | |
bf441451 | 84 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
85 | free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), |
86 | backend_dev->rdma_dev_res); | |
87 | } | |
88 | } while (cqe_ctx_id != -ENOENT); | |
89 | } | |
90 | ||
1373f4a8 | 91 | static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) |
ef6d4ccd | 92 | { |
c2dd117b | 93 | int i, ne, total_ne = 0; |
ef6d4ccd YS |
94 | BackendCtx *bctx; |
95 | struct ibv_wc wc[2]; | |
e926c9f1 | 96 | RdmaProtectedGSList *cqe_ctx_list; |
ef6d4ccd | 97 | |
2cfa9530 | 98 | qemu_mutex_lock(&rdma_dev_res->lock); |
ef6d4ccd YS |
99 | do { |
100 | ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); | |
101 | ||
4d71b38a | 102 | trace_rdma_poll_cq(ne, ibcq); |
ef6d4ccd YS |
103 | |
104 | for (i = 0; i < ne; i++) { | |
ef6d4ccd YS |
105 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); |
106 | if (unlikely(!bctx)) { | |
4d71b38a YS |
107 | rdma_error_report("No matching ctx for req %"PRId64, |
108 | wc[i].wr_id); | |
ef6d4ccd YS |
109 | continue; |
110 | } | |
ef6d4ccd | 111 | |
eaac0100 | 112 | comp_handler(bctx->up_ctx, &wc[i]); |
ef6d4ccd | 113 | |
e926c9f1 KH |
114 | if (bctx->backend_qp) { |
115 | cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; | |
116 | } else { | |
117 | cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; | |
118 | } | |
119 | ||
120 | rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); | |
ef6d4ccd YS |
121 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); |
122 | g_free(bctx); | |
123 | } | |
c2dd117b | 124 | total_ne += ne; |
ef6d4ccd | 125 | } while (ne > 0); |
c2dd117b | 126 | atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); |
2cfa9530 | 127 | qemu_mutex_unlock(&rdma_dev_res->lock); |
ef6d4ccd YS |
128 | |
129 | if (ne < 0) { | |
4d71b38a | 130 | rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); |
ef6d4ccd | 131 | } |
c2dd117b YS |
132 | |
133 | rdma_dev_res->stats.completions += total_ne; | |
134 | ||
135 | return total_ne; | |
ef6d4ccd YS |
136 | } |
137 | ||
138 | static void *comp_handler_thread(void *arg) | |
139 | { | |
140 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; | |
141 | int rc; | |
142 | struct ibv_cq *ev_cq; | |
143 | void *ev_ctx; | |
75152227 YS |
144 | int flags; |
145 | GPollFD pfds[1]; | |
146 | ||
147 | /* Change to non-blocking mode */ | |
148 | flags = fcntl(backend_dev->channel->fd, F_GETFL); | |
149 | rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); | |
150 | if (rc < 0) { | |
4d71b38a | 151 | rdma_error_report("Failed to change backend channel FD to non-blocking"); |
75152227 YS |
152 | return NULL; |
153 | } | |
ef6d4ccd | 154 | |
75152227 YS |
155 | pfds[0].fd = backend_dev->channel->fd; |
156 | pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; | |
157 | ||
158 | backend_dev->comp_thread.is_running = true; | |
159 | ||
ef6d4ccd | 160 | while (backend_dev->comp_thread.run) { |
75152227 YS |
161 | do { |
162 | rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); | |
c2dd117b YS |
163 | if (!rc) { |
164 | backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; | |
165 | } | |
75152227 YS |
166 | } while (!rc && backend_dev->comp_thread.run); |
167 | ||
168 | if (backend_dev->comp_thread.run) { | |
75152227 | 169 | rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); |
75152227 | 170 | if (unlikely(rc)) { |
4d71b38a YS |
171 | rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, |
172 | errno); | |
75152227 YS |
173 | continue; |
174 | } | |
ef6d4ccd | 175 | |
75152227 YS |
176 | rc = ibv_req_notify_cq(ev_cq, 0); |
177 | if (unlikely(rc)) { | |
4d71b38a YS |
178 | rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, |
179 | errno); | |
75152227 | 180 | } |
ef6d4ccd | 181 | |
c2dd117b | 182 | backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; |
1373f4a8 | 183 | rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); |
ef6d4ccd | 184 | |
75152227 YS |
185 | ibv_ack_cq_events(ev_cq, 1); |
186 | } | |
ef6d4ccd YS |
187 | } |
188 | ||
75152227 YS |
189 | backend_dev->comp_thread.is_running = false; |
190 | ||
191 | qemu_thread_exit(0); | |
192 | ||
ef6d4ccd YS |
193 | return NULL; |
194 | } | |
195 | ||
2b05705d YS |
196 | static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) |
197 | { | |
198 | atomic_set(&backend_dev->rdmacm_mux.can_receive, 0); | |
199 | } | |
200 | ||
201 | static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) | |
202 | { | |
203 | atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); | |
204 | } | |
205 | ||
206 | static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) | |
207 | { | |
208 | return atomic_read(&backend_dev->rdmacm_mux.can_receive); | |
209 | } | |
210 | ||
4d71b38a | 211 | static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) |
2b05705d | 212 | { |
555b3d67 | 213 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
214 | int ret; |
215 | ||
2b05705d YS |
216 | ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg)); |
217 | if (ret != sizeof(msg)) { | |
4d71b38a YS |
218 | rdma_error_report("Got invalid message from mux: size %d, expecting %d", |
219 | ret, (int)sizeof(msg)); | |
2b05705d YS |
220 | return -EIO; |
221 | } | |
222 | ||
4d71b38a YS |
223 | trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code, |
224 | msg.hdr.err_code); | |
2b05705d YS |
225 | |
226 | if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) { | |
4d71b38a | 227 | rdma_error_report("Got invalid message type %d", msg.hdr.msg_type); |
2b05705d YS |
228 | return -EIO; |
229 | } | |
230 | ||
231 | if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) { | |
4d71b38a YS |
232 | rdma_error_report("Operation failed in mux, error code %d", |
233 | msg.hdr.err_code); | |
2b05705d YS |
234 | return -EIO; |
235 | } | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
4d71b38a | 240 | static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) |
2b05705d YS |
241 | { |
242 | int rc = 0; | |
243 | ||
2b05705d | 244 | msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ; |
4d71b38a | 245 | trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
246 | disable_rdmacm_mux_async(backend_dev); |
247 | rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be, | |
248 | (const uint8_t *)msg, sizeof(*msg)); | |
249 | if (rc != sizeof(*msg)) { | |
250 | enable_rdmacm_mux_async(backend_dev); | |
4d71b38a | 251 | rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc); |
2b05705d YS |
252 | return -EIO; |
253 | } | |
254 | ||
4d71b38a | 255 | rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be); |
2b05705d | 256 | if (rc) { |
4d71b38a YS |
257 | rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)", |
258 | msg->hdr.op_code, rc); | |
2b05705d YS |
259 | } |
260 | ||
261 | enable_rdmacm_mux_async(backend_dev); | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
292dce62 | 266 | static void stop_backend_thread(RdmaBackendThread *thread) |
75152227 | 267 | { |
292dce62 YS |
268 | thread->run = false; |
269 | while (thread->is_running) { | |
75152227 YS |
270 | sleep(THR_POLL_TO / SCALE_US / 2); |
271 | } | |
272 | } | |
273 | ||
274 | static void start_comp_thread(RdmaBackendDev *backend_dev) | |
275 | { | |
a421c811 | 276 | char thread_name[THR_NAME_LEN] = {}; |
75152227 | 277 | |
292dce62 | 278 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
279 | |
280 | snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", | |
281 | ibv_get_device_name(backend_dev->ib_dev)); | |
282 | backend_dev->comp_thread.run = true; | |
283 | qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, | |
284 | comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); | |
285 | } | |
286 | ||
eaac0100 YS |
287 | void rdma_backend_register_comp_handler(void (*handler)(void *ctx, |
288 | struct ibv_wc *wc)) | |
ef6d4ccd YS |
289 | { |
290 | comp_handler = handler; | |
291 | } | |
292 | ||
293 | void rdma_backend_unregister_comp_handler(void) | |
294 | { | |
295 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
296 | } | |
297 | ||
298 | int rdma_backend_query_port(RdmaBackendDev *backend_dev, | |
299 | struct ibv_port_attr *port_attr) | |
300 | { | |
301 | int rc; | |
302 | ||
303 | rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr); | |
304 | if (rc) { | |
4d71b38a | 305 | rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
306 | return -EIO; |
307 | } | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq) | |
313 | { | |
c2dd117b YS |
314 | int polled; |
315 | ||
316 | rdma_dev_res->stats.poll_cq_from_guest++; | |
1373f4a8 | 317 | polled = rdma_poll_cq(rdma_dev_res, cq->ibcq); |
c2dd117b YS |
318 | if (!polled) { |
319 | rdma_dev_res->stats.poll_cq_from_guest_empty++; | |
320 | } | |
ef6d4ccd YS |
321 | } |
322 | ||
323 | static GHashTable *ah_hash; | |
324 | ||
325 | static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, | |
326 | uint8_t sgid_idx, union ibv_gid *dgid) | |
327 | { | |
328 | GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid)); | |
329 | struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key); | |
330 | ||
331 | if (ah) { | |
4d71b38a YS |
332 | trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), |
333 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
334 | g_bytes_unref(ah_key); |
335 | } else { | |
336 | struct ibv_ah_attr ah_attr = { | |
337 | .is_global = 1, | |
338 | .port_num = backend_dev->port_num, | |
339 | .grh.hop_limit = 1, | |
340 | }; | |
341 | ||
342 | ah_attr.grh.dgid = *dgid; | |
343 | ah_attr.grh.sgid_index = sgid_idx; | |
344 | ||
345 | ah = ibv_create_ah(pd, &ah_attr); | |
346 | if (ah) { | |
347 | g_hash_table_insert(ah_hash, ah_key, ah); | |
348 | } else { | |
349 | g_bytes_unref(ah_key); | |
4d71b38a YS |
350 | rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">", |
351 | be64_to_cpu(dgid->global.subnet_prefix), | |
352 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
353 | } |
354 | ||
4d71b38a YS |
355 | trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), |
356 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
357 | } |
358 | ||
359 | return ah; | |
360 | } | |
361 | ||
362 | static void destroy_ah_hash_key(gpointer data) | |
363 | { | |
364 | g_bytes_unref(data); | |
365 | } | |
366 | ||
367 | static void destroy_ah_hast_data(gpointer data) | |
368 | { | |
369 | struct ibv_ah *ah = data; | |
370 | ||
371 | ibv_destroy_ah(ah); | |
372 | } | |
373 | ||
374 | static void ah_cache_init(void) | |
375 | { | |
376 | ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, | |
377 | destroy_ah_hash_key, destroy_ah_hast_data); | |
378 | } | |
379 | ||
380 | static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, | |
381 | struct ibv_sge *dsge, struct ibv_sge *ssge, | |
c2dd117b | 382 | uint8_t num_sge, uint64_t *total_length) |
ef6d4ccd YS |
383 | { |
384 | RdmaRmMR *mr; | |
385 | int ssge_idx; | |
386 | ||
ef6d4ccd YS |
387 | for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) { |
388 | mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey); | |
389 | if (unlikely(!mr)) { | |
4d71b38a | 390 | rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey); |
ef6d4ccd YS |
391 | return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey; |
392 | } | |
393 | ||
68b89aee | 394 | #ifdef LEGACY_RDMA_REG_MR |
7f99daad | 395 | dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start; |
68b89aee YS |
396 | #else |
397 | dsge->addr = ssge[ssge_idx].addr; | |
398 | #endif | |
ef6d4ccd YS |
399 | dsge->length = ssge[ssge_idx].length; |
400 | dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr); | |
401 | ||
c2dd117b YS |
402 | *total_length += dsge->length; |
403 | ||
ef6d4ccd YS |
404 | dsge++; |
405 | } | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
4d71b38a YS |
410 | static void trace_mad_message(const char *title, char *buf, int len) |
411 | { | |
412 | int i; | |
413 | char *b = g_malloc0(len * 3 + 1); | |
414 | char b1[4]; | |
415 | ||
416 | for (i = 0; i < len; i++) { | |
417 | sprintf(b1, "%.2X ", buf[i] & 0x000000FF); | |
418 | strcat(b, b1); | |
419 | } | |
420 | ||
421 | trace_rdma_mad_message(title, len, b); | |
422 | ||
423 | g_free(b); | |
424 | } | |
425 | ||
2b05705d YS |
426 | static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, |
427 | union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge) | |
605ec166 | 428 | { |
555b3d67 | 429 | RdmaCmMuxMsg msg = {}; |
2b05705d | 430 | char *hdr, *data; |
605ec166 YS |
431 | int ret; |
432 | ||
605ec166 YS |
433 | if (num_sge != 2) { |
434 | return -EINVAL; | |
435 | } | |
436 | ||
2b05705d YS |
437 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD; |
438 | memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid)); | |
605ec166 | 439 | |
2b05705d | 440 | msg.umad_len = sge[0].length + sge[1].length; |
2b05705d YS |
441 | |
442 | if (msg.umad_len > sizeof(msg.umad.mad)) { | |
605ec166 YS |
443 | return -ENOMEM; |
444 | } | |
445 | ||
2b05705d YS |
446 | msg.umad.hdr.addr.qpn = htobe32(1); |
447 | msg.umad.hdr.addr.grh_present = 1; | |
2b05705d YS |
448 | msg.umad.hdr.addr.gid_index = sgid_idx; |
449 | memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid)); | |
450 | msg.umad.hdr.addr.hop_limit = 0xFF; | |
605ec166 YS |
451 | |
452 | hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length); | |
453 | if (!hdr) { | |
605ec166 YS |
454 | return -ENOMEM; |
455 | } | |
2b05705d YS |
456 | data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length); |
457 | if (!data) { | |
605ec166 YS |
458 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
459 | return -ENOMEM; | |
460 | } | |
461 | ||
2b05705d YS |
462 | memcpy(&msg.umad.mad[0], hdr, sge[0].length); |
463 | memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length); | |
605ec166 | 464 | |
2b05705d | 465 | rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length); |
605ec166 YS |
466 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
467 | ||
4d71b38a YS |
468 | trace_mad_message("send", msg.umad.mad, msg.umad_len); |
469 | ||
470 | ret = rdmacm_mux_send(backend_dev, &msg); | |
2b05705d | 471 | if (ret) { |
4d71b38a | 472 | rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret); |
2b05705d YS |
473 | return -EIO; |
474 | } | |
605ec166 | 475 | |
2b05705d | 476 | return 0; |
605ec166 YS |
477 | } |
478 | ||
ef6d4ccd YS |
479 | void rdma_backend_post_send(RdmaBackendDev *backend_dev, |
480 | RdmaBackendQP *qp, uint8_t qp_type, | |
481 | struct ibv_sge *sge, uint32_t num_sge, | |
2b05705d YS |
482 | uint8_t sgid_idx, union ibv_gid *sgid, |
483 | union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, | |
484 | void *ctx) | |
ef6d4ccd YS |
485 | { |
486 | BackendCtx *bctx; | |
487 | struct ibv_sge new_sge[MAX_SGE]; | |
488 | uint32_t bctx_id; | |
489 | int rc; | |
a421c811 | 490 | struct ibv_send_wr wr = {}, *bad_wr; |
ef6d4ccd | 491 | |
4d71b38a | 492 | if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */ |
ef6d4ccd | 493 | if (qp_type == IBV_QPT_SMI) { |
4d71b38a | 494 | rdma_error_report("Got QP0 request"); |
eaac0100 | 495 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd | 496 | } else if (qp_type == IBV_QPT_GSI) { |
2b05705d | 497 | rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge); |
605ec166 | 498 | if (rc) { |
eaac0100 | 499 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx); |
c2dd117b | 500 | backend_dev->rdma_dev_res->stats.mad_tx_err++; |
605ec166 | 501 | } else { |
eaac0100 | 502 | complete_work(IBV_WC_SUCCESS, 0, ctx); |
c2dd117b | 503 | backend_dev->rdma_dev_res->stats.mad_tx++; |
605ec166 | 504 | } |
ef6d4ccd | 505 | } |
ef6d4ccd YS |
506 | return; |
507 | } | |
508 | ||
ef6d4ccd YS |
509 | bctx = g_malloc0(sizeof(*bctx)); |
510 | bctx->up_ctx = ctx; | |
bf441451 | 511 | bctx->backend_qp = qp; |
ef6d4ccd YS |
512 | |
513 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
514 | if (unlikely(rc)) { | |
eaac0100 | 515 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 516 | goto err_free_bctx; |
ef6d4ccd YS |
517 | } |
518 | ||
bf441451 YS |
519 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
520 | ||
c2dd117b YS |
521 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, |
522 | &backend_dev->rdma_dev_res->stats.tx_len); | |
ef6d4ccd | 523 | if (rc) { |
eaac0100 | 524 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 525 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
526 | } |
527 | ||
528 | if (qp_type == IBV_QPT_UD) { | |
2b05705d | 529 | wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid); |
305bdd7a | 530 | if (!wr.wr.ud.ah) { |
eaac0100 | 531 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 532 | goto err_dealloc_cqe_ctx; |
305bdd7a | 533 | } |
ef6d4ccd YS |
534 | wr.wr.ud.remote_qpn = dqpn; |
535 | wr.wr.ud.remote_qkey = dqkey; | |
536 | } | |
537 | ||
538 | wr.num_sge = num_sge; | |
539 | wr.opcode = IBV_WR_SEND; | |
540 | wr.send_flags = IBV_SEND_SIGNALED; | |
541 | wr.sg_list = new_sge; | |
542 | wr.wr_id = bctx_id; | |
543 | ||
544 | rc = ibv_post_send(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 545 | if (rc) { |
4d71b38a YS |
546 | rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d", |
547 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 548 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 549 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
550 | } |
551 | ||
c2dd117b YS |
552 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
553 | backend_dev->rdma_dev_res->stats.tx++; | |
554 | ||
ef6d4ccd YS |
555 | return; |
556 | ||
c2dd117b YS |
557 | err_dealloc_cqe_ctx: |
558 | backend_dev->rdma_dev_res->stats.tx_err++; | |
ef6d4ccd YS |
559 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
560 | ||
c2dd117b | 561 | err_free_bctx: |
ef6d4ccd YS |
562 | g_free(bctx); |
563 | } | |
564 | ||
605ec166 YS |
565 | static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev, |
566 | struct ibv_sge *sge, uint32_t num_sge, | |
567 | void *ctx) | |
568 | { | |
569 | BackendCtx *bctx; | |
570 | int rc; | |
571 | uint32_t bctx_id; | |
572 | ||
573 | if (num_sge != 1) { | |
4d71b38a | 574 | rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge); |
605ec166 YS |
575 | return VENDOR_ERR_INV_NUM_SGE; |
576 | } | |
577 | ||
578 | if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) { | |
4d71b38a | 579 | rdma_error_report("Too small buffer for MAD"); |
605ec166 YS |
580 | return VENDOR_ERR_INV_MAD_BUFF; |
581 | } | |
582 | ||
605ec166 YS |
583 | bctx = g_malloc0(sizeof(*bctx)); |
584 | ||
585 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
586 | if (unlikely(rc)) { | |
587 | g_free(bctx); | |
605ec166 YS |
588 | return VENDOR_ERR_NOMEM; |
589 | } | |
590 | ||
605ec166 YS |
591 | bctx->up_ctx = ctx; |
592 | bctx->sge = *sge; | |
593 | ||
b20fc795 | 594 | rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id); |
605ec166 YS |
595 | |
596 | return 0; | |
597 | } | |
598 | ||
ef6d4ccd | 599 | void rdma_backend_post_recv(RdmaBackendDev *backend_dev, |
ef6d4ccd YS |
600 | RdmaBackendQP *qp, uint8_t qp_type, |
601 | struct ibv_sge *sge, uint32_t num_sge, void *ctx) | |
602 | { | |
603 | BackendCtx *bctx; | |
604 | struct ibv_sge new_sge[MAX_SGE]; | |
605 | uint32_t bctx_id; | |
606 | int rc; | |
a421c811 | 607 | struct ibv_recv_wr wr = {}, *bad_wr; |
ef6d4ccd YS |
608 | |
609 | if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ | |
610 | if (qp_type == IBV_QPT_SMI) { | |
4d71b38a | 611 | rdma_error_report("Got QP0 request"); |
eaac0100 | 612 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd YS |
613 | } |
614 | if (qp_type == IBV_QPT_GSI) { | |
605ec166 YS |
615 | rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx); |
616 | if (rc) { | |
eaac0100 | 617 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
3c890bcf | 618 | backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++; |
c2dd117b | 619 | } else { |
3c890bcf | 620 | backend_dev->rdma_dev_res->stats.mad_rx_bufs++; |
605ec166 | 621 | } |
ef6d4ccd YS |
622 | } |
623 | return; | |
624 | } | |
625 | ||
ef6d4ccd YS |
626 | bctx = g_malloc0(sizeof(*bctx)); |
627 | bctx->up_ctx = ctx; | |
bf441451 | 628 | bctx->backend_qp = qp; |
ef6d4ccd | 629 | |
3c890bcf | 630 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); |
ef6d4ccd | 631 | if (unlikely(rc)) { |
eaac0100 | 632 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 633 | goto err_free_bctx; |
ef6d4ccd YS |
634 | } |
635 | ||
bf441451 YS |
636 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
637 | ||
3c890bcf | 638 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, |
c2dd117b | 639 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); |
ef6d4ccd | 640 | if (rc) { |
eaac0100 | 641 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 642 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
643 | } |
644 | ||
645 | wr.num_sge = num_sge; | |
646 | wr.sg_list = new_sge; | |
647 | wr.wr_id = bctx_id; | |
648 | rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 649 | if (rc) { |
4d71b38a YS |
650 | rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d", |
651 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 652 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 653 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
654 | } |
655 | ||
c2dd117b | 656 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
3c890bcf | 657 | backend_dev->rdma_dev_res->stats.rx_bufs++; |
c2dd117b | 658 | |
ef6d4ccd YS |
659 | return; |
660 | ||
c2dd117b YS |
661 | err_dealloc_cqe_ctx: |
662 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
3c890bcf | 663 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
ef6d4ccd | 664 | |
c2dd117b | 665 | err_free_bctx: |
ef6d4ccd YS |
666 | g_free(bctx); |
667 | } | |
668 | ||
e926c9f1 KH |
669 | void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, |
670 | RdmaBackendSRQ *srq, struct ibv_sge *sge, | |
671 | uint32_t num_sge, void *ctx) | |
672 | { | |
673 | BackendCtx *bctx; | |
674 | struct ibv_sge new_sge[MAX_SGE]; | |
675 | uint32_t bctx_id; | |
676 | int rc; | |
677 | struct ibv_recv_wr wr = {}, *bad_wr; | |
678 | ||
679 | bctx = g_malloc0(sizeof(*bctx)); | |
680 | bctx->up_ctx = ctx; | |
681 | bctx->backend_srq = srq; | |
682 | ||
683 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
684 | if (unlikely(rc)) { | |
685 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); | |
686 | goto err_free_bctx; | |
687 | } | |
688 | ||
689 | rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id); | |
690 | ||
691 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, | |
692 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); | |
693 | if (rc) { | |
694 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); | |
695 | goto err_dealloc_cqe_ctx; | |
696 | } | |
697 | ||
698 | wr.num_sge = num_sge; | |
699 | wr.sg_list = new_sge; | |
700 | wr.wr_id = bctx_id; | |
701 | rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr); | |
702 | if (rc) { | |
703 | rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d", | |
704 | srq->ibsrq->handle, rc, errno); | |
705 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); | |
706 | goto err_dealloc_cqe_ctx; | |
707 | } | |
708 | ||
709 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | |
710 | backend_dev->rdma_dev_res->stats.rx_bufs++; | |
711 | backend_dev->rdma_dev_res->stats.rx_srq++; | |
712 | ||
713 | return; | |
714 | ||
715 | err_dealloc_cqe_ctx: | |
716 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
717 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); | |
718 | ||
719 | err_free_bctx: | |
720 | g_free(bctx); | |
721 | } | |
722 | ||
ef6d4ccd YS |
723 | int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd) |
724 | { | |
725 | pd->ibpd = ibv_alloc_pd(backend_dev->context); | |
726 | ||
4d71b38a YS |
727 | if (!pd->ibpd) { |
728 | rdma_error_report("ibv_alloc_pd fail, errno=%d", errno); | |
729 | return -EIO; | |
730 | } | |
731 | ||
732 | return 0; | |
ef6d4ccd YS |
733 | } |
734 | ||
735 | void rdma_backend_destroy_pd(RdmaBackendPD *pd) | |
736 | { | |
737 | if (pd->ibpd) { | |
738 | ibv_dealloc_pd(pd->ibpd); | |
739 | } | |
740 | } | |
741 | ||
68b89aee | 742 | #ifdef LEGACY_RDMA_REG_MR |
9bbb8d35 | 743 | int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, |
ef6d4ccd | 744 | size_t length, int access) |
68b89aee YS |
745 | #else |
746 | int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, | |
747 | size_t length, uint64_t guest_start, int access) | |
748 | #endif | |
ef6d4ccd | 749 | { |
68b89aee | 750 | #ifdef LEGACY_RDMA_REG_MR |
9bbb8d35 | 751 | mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); |
68b89aee YS |
752 | #else |
753 | mr->ibmr = ibv_reg_mr_iova(pd->ibpd, addr, length, guest_start, access); | |
754 | #endif | |
4d71b38a YS |
755 | if (!mr->ibmr) { |
756 | rdma_error_report("ibv_reg_mr fail, errno=%d", errno); | |
757 | return -EIO; | |
ef6d4ccd YS |
758 | } |
759 | ||
4d71b38a YS |
760 | mr->ibpd = pd->ibpd; |
761 | ||
762 | return 0; | |
ef6d4ccd YS |
763 | } |
764 | ||
765 | void rdma_backend_destroy_mr(RdmaBackendMR *mr) | |
766 | { | |
767 | if (mr->ibmr) { | |
768 | ibv_dereg_mr(mr->ibmr); | |
769 | } | |
770 | } | |
771 | ||
772 | int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, | |
773 | int cqe) | |
774 | { | |
775 | int rc; | |
776 | ||
ef6d4ccd YS |
777 | cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL, |
778 | backend_dev->channel, 0); | |
4d71b38a YS |
779 | if (!cq->ibcq) { |
780 | rdma_error_report("ibv_create_cq fail, errno=%d", errno); | |
781 | return -EIO; | |
782 | } | |
ef6d4ccd | 783 | |
4d71b38a YS |
784 | rc = ibv_req_notify_cq(cq->ibcq, 0); |
785 | if (rc) { | |
786 | rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
787 | } |
788 | ||
4d71b38a YS |
789 | cq->backend_dev = backend_dev; |
790 | ||
791 | return 0; | |
ef6d4ccd YS |
792 | } |
793 | ||
794 | void rdma_backend_destroy_cq(RdmaBackendCQ *cq) | |
795 | { | |
796 | if (cq->ibcq) { | |
797 | ibv_destroy_cq(cq->ibcq); | |
798 | } | |
799 | } | |
800 | ||
801 | int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, | |
802 | RdmaBackendPD *pd, RdmaBackendCQ *scq, | |
8b42cfab KH |
803 | RdmaBackendCQ *rcq, RdmaBackendSRQ *srq, |
804 | uint32_t max_send_wr, uint32_t max_recv_wr, | |
805 | uint32_t max_send_sge, uint32_t max_recv_sge) | |
ef6d4ccd | 806 | { |
a421c811 | 807 | struct ibv_qp_init_attr attr = {}; |
ef6d4ccd YS |
808 | |
809 | qp->ibqp = 0; | |
ef6d4ccd YS |
810 | |
811 | switch (qp_type) { | |
812 | case IBV_QPT_GSI: | |
ef6d4ccd YS |
813 | return 0; |
814 | ||
815 | case IBV_QPT_RC: | |
816 | /* fall through */ | |
817 | case IBV_QPT_UD: | |
818 | /* do nothing */ | |
819 | break; | |
820 | ||
821 | default: | |
4d71b38a | 822 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
823 | return -EIO; |
824 | } | |
825 | ||
826 | attr.qp_type = qp_type; | |
827 | attr.send_cq = scq->ibcq; | |
828 | attr.recv_cq = rcq->ibcq; | |
829 | attr.cap.max_send_wr = max_send_wr; | |
830 | attr.cap.max_recv_wr = max_recv_wr; | |
831 | attr.cap.max_send_sge = max_send_sge; | |
832 | attr.cap.max_recv_sge = max_recv_sge; | |
8b42cfab KH |
833 | if (srq) { |
834 | attr.srq = srq->ibsrq; | |
835 | } | |
ef6d4ccd | 836 | |
ef6d4ccd | 837 | qp->ibqp = ibv_create_qp(pd->ibpd, &attr); |
4d71b38a YS |
838 | if (!qp->ibqp) { |
839 | rdma_error_report("ibv_create_qp fail, errno=%d", errno); | |
ef6d4ccd YS |
840 | return -EIO; |
841 | } | |
842 | ||
bf441451 YS |
843 | rdma_protected_gslist_init(&qp->cqe_ctx_list); |
844 | ||
ef6d4ccd YS |
845 | qp->ibpd = pd->ibpd; |
846 | ||
847 | /* TODO: Query QP to get max_inline_data and save it to be used in send */ | |
848 | ||
ef6d4ccd YS |
849 | return 0; |
850 | } | |
851 | ||
852 | int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
853 | uint8_t qp_type, uint32_t qkey) | |
854 | { | |
a421c811 | 855 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
856 | int rc, attr_mask; |
857 | ||
ef6d4ccd YS |
858 | attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT; |
859 | attr.qp_state = IBV_QPS_INIT; | |
860 | attr.pkey_index = 0; | |
861 | attr.port_num = backend_dev->port_num; | |
862 | ||
863 | switch (qp_type) { | |
864 | case IBV_QPT_RC: | |
865 | attr_mask |= IBV_QP_ACCESS_FLAGS; | |
4d71b38a | 866 | trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num); |
ef6d4ccd YS |
867 | break; |
868 | ||
869 | case IBV_QPT_UD: | |
870 | attr.qkey = qkey; | |
871 | attr_mask |= IBV_QP_QKEY; | |
4d71b38a | 872 | trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey); |
ef6d4ccd YS |
873 | break; |
874 | ||
875 | default: | |
4d71b38a | 876 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
877 | return -EIO; |
878 | } | |
879 | ||
880 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
881 | if (rc) { | |
4d71b38a | 882 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
883 | return -EIO; |
884 | } | |
885 | ||
886 | return 0; | |
887 | } | |
888 | ||
889 | int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
2b05705d YS |
890 | uint8_t qp_type, uint8_t sgid_idx, |
891 | union ibv_gid *dgid, uint32_t dqpn, | |
892 | uint32_t rq_psn, uint32_t qkey, bool use_qkey) | |
ef6d4ccd | 893 | { |
a421c811 | 894 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
895 | union ibv_gid ibv_gid = { |
896 | .global.interface_id = dgid->global.interface_id, | |
897 | .global.subnet_prefix = dgid->global.subnet_prefix | |
898 | }; | |
899 | int rc, attr_mask; | |
900 | ||
901 | attr.qp_state = IBV_QPS_RTR; | |
902 | attr_mask = IBV_QP_STATE; | |
903 | ||
2b05705d YS |
904 | qp->sgid_idx = sgid_idx; |
905 | ||
ef6d4ccd YS |
906 | switch (qp_type) { |
907 | case IBV_QPT_RC: | |
ef6d4ccd YS |
908 | attr.path_mtu = IBV_MTU_1024; |
909 | attr.dest_qp_num = dqpn; | |
910 | attr.max_dest_rd_atomic = 1; | |
911 | attr.min_rnr_timer = 12; | |
912 | attr.ah_attr.port_num = backend_dev->port_num; | |
913 | attr.ah_attr.is_global = 1; | |
914 | attr.ah_attr.grh.hop_limit = 1; | |
915 | attr.ah_attr.grh.dgid = ibv_gid; | |
2b05705d | 916 | attr.ah_attr.grh.sgid_index = qp->sgid_idx; |
ef6d4ccd YS |
917 | attr.rq_psn = rq_psn; |
918 | ||
919 | attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | | |
920 | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | | |
921 | IBV_QP_MIN_RNR_TIMER; | |
4d71b38a YS |
922 | |
923 | trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, | |
924 | be64_to_cpu(ibv_gid.global. | |
925 | subnet_prefix), | |
926 | be64_to_cpu(ibv_gid.global. | |
927 | interface_id), | |
928 | qp->sgid_idx, dqpn, rq_psn); | |
ef6d4ccd YS |
929 | break; |
930 | ||
931 | case IBV_QPT_UD: | |
932 | if (use_qkey) { | |
ef6d4ccd YS |
933 | attr.qkey = qkey; |
934 | attr_mask |= IBV_QP_QKEY; | |
935 | } | |
4d71b38a YS |
936 | trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : |
937 | 0); | |
ef6d4ccd YS |
938 | break; |
939 | } | |
940 | ||
941 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
942 | if (rc) { | |
4d71b38a | 943 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
944 | return -EIO; |
945 | } | |
946 | ||
947 | return 0; | |
948 | } | |
949 | ||
950 | int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, | |
951 | uint32_t sq_psn, uint32_t qkey, bool use_qkey) | |
952 | { | |
a421c811 | 953 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
954 | int rc, attr_mask; |
955 | ||
ef6d4ccd YS |
956 | attr.qp_state = IBV_QPS_RTS; |
957 | attr.sq_psn = sq_psn; | |
958 | attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; | |
959 | ||
960 | switch (qp_type) { | |
961 | case IBV_QPT_RC: | |
962 | attr.timeout = 14; | |
963 | attr.retry_cnt = 7; | |
964 | attr.rnr_retry = 7; | |
965 | attr.max_rd_atomic = 1; | |
966 | ||
967 | attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | | |
968 | IBV_QP_MAX_QP_RD_ATOMIC; | |
4d71b38a | 969 | trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); |
ef6d4ccd YS |
970 | break; |
971 | ||
972 | case IBV_QPT_UD: | |
973 | if (use_qkey) { | |
ef6d4ccd YS |
974 | attr.qkey = qkey; |
975 | attr_mask |= IBV_QP_QKEY; | |
976 | } | |
4d71b38a YS |
977 | trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, |
978 | use_qkey ? qkey : 0); | |
ef6d4ccd YS |
979 | break; |
980 | } | |
981 | ||
982 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
983 | if (rc) { | |
4d71b38a | 984 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
985 | return -EIO; |
986 | } | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
c99f2174 YS |
991 | int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, |
992 | int attr_mask, struct ibv_qp_init_attr *init_attr) | |
993 | { | |
994 | if (!qp->ibqp) { | |
c99f2174 YS |
995 | attr->qp_state = IBV_QPS_RTS; |
996 | return 0; | |
997 | } | |
998 | ||
999 | return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); | |
1000 | } | |
1001 | ||
bf441451 | 1002 | void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) |
ef6d4ccd YS |
1003 | { |
1004 | if (qp->ibqp) { | |
1005 | ibv_destroy_qp(qp->ibqp); | |
1006 | } | |
bf441451 YS |
1007 | g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); |
1008 | rdma_protected_gslist_destroy(&qp->cqe_ctx_list); | |
ef6d4ccd YS |
1009 | } |
1010 | ||
e926c9f1 KH |
1011 | int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, |
1012 | uint32_t max_wr, uint32_t max_sge, | |
1013 | uint32_t srq_limit) | |
1014 | { | |
1015 | struct ibv_srq_init_attr srq_init_attr = {}; | |
1016 | ||
1017 | srq_init_attr.attr.max_wr = max_wr; | |
1018 | srq_init_attr.attr.max_sge = max_sge; | |
1019 | srq_init_attr.attr.srq_limit = srq_limit; | |
1020 | ||
1021 | srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr); | |
1022 | if (!srq->ibsrq) { | |
1023 | rdma_error_report("ibv_create_srq failed, errno=%d", errno); | |
1024 | return -EIO; | |
1025 | } | |
1026 | ||
1027 | rdma_protected_gslist_init(&srq->cqe_ctx_list); | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr) | |
1033 | { | |
1034 | if (!srq->ibsrq) { | |
1035 | return -EINVAL; | |
1036 | } | |
1037 | ||
1038 | return ibv_query_srq(srq->ibsrq, srq_attr); | |
1039 | } | |
1040 | ||
1041 | int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, | |
1042 | int srq_attr_mask) | |
1043 | { | |
1044 | if (!srq->ibsrq) { | |
1045 | return -EINVAL; | |
1046 | } | |
1047 | ||
1048 | return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask); | |
1049 | } | |
1050 | ||
1051 | void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res) | |
1052 | { | |
1053 | if (srq->ibsrq) { | |
1054 | ibv_destroy_srq(srq->ibsrq); | |
1055 | } | |
1056 | g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res); | |
1057 | rdma_protected_gslist_destroy(&srq->cqe_ctx_list); | |
1058 | } | |
1059 | ||
ef6d4ccd | 1060 | #define CHK_ATTR(req, dev, member, fmt) ({ \ |
4d71b38a | 1061 | trace_rdma_check_dev_attr(#member, dev.member, req->member); \ |
ef6d4ccd | 1062 | if (req->member > dev.member) { \ |
4d71b38a YS |
1063 | rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ |
1064 | #member, req->member, dev.member); \ | |
ef6d4ccd YS |
1065 | req->member = dev.member; \ |
1066 | } \ | |
4d71b38a | 1067 | }) |
ef6d4ccd YS |
1068 | |
1069 | static int init_device_caps(RdmaBackendDev *backend_dev, | |
1070 | struct ibv_device_attr *dev_attr) | |
1071 | { | |
732d948c | 1072 | struct ibv_device_attr bk_dev_attr; |
4d71b38a | 1073 | int rc; |
732d948c | 1074 | |
4d71b38a YS |
1075 | rc = ibv_query_device(backend_dev->context, &bk_dev_attr); |
1076 | if (rc) { | |
1077 | rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
1078 | return -EIO; |
1079 | } | |
1080 | ||
ffef4775 | 1081 | dev_attr->max_sge = MAX_SGE; |
e926c9f1 | 1082 | dev_attr->max_srq_sge = MAX_SGE; |
ffef4775 | 1083 | |
732d948c YS |
1084 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); |
1085 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); | |
1086 | CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); | |
732d948c | 1087 | CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); |
732d948c YS |
1088 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); |
1089 | CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); | |
1090 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); | |
1091 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d"); | |
1092 | CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d"); | |
e926c9f1 | 1093 | CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d"); |
ef6d4ccd YS |
1094 | |
1095 | return 0; | |
1096 | } | |
1097 | ||
605ec166 YS |
1098 | static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, |
1099 | union ibv_gid *my_gid, int paylen) | |
1100 | { | |
1101 | grh->paylen = htons(paylen); | |
1102 | grh->sgid = *sgid; | |
1103 | grh->dgid = *my_gid; | |
605ec166 YS |
1104 | } |
1105 | ||
2b05705d YS |
1106 | static void process_incoming_mad_req(RdmaBackendDev *backend_dev, |
1107 | RdmaCmMuxMsg *msg) | |
605ec166 | 1108 | { |
605ec166 YS |
1109 | unsigned long cqe_ctx_id; |
1110 | BackendCtx *bctx; | |
1111 | char *mad; | |
605ec166 | 1112 | |
4d71b38a | 1113 | trace_mad_message("recv", msg->umad.mad, msg->umad_len); |
605ec166 | 1114 | |
b20fc795 YS |
1115 | cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list); |
1116 | if (cqe_ctx_id == -ENOENT) { | |
4d71b38a | 1117 | rdma_warn_report("No more free MADs buffers, waiting for a while"); |
605ec166 YS |
1118 | sleep(THR_POLL_TO); |
1119 | return; | |
1120 | } | |
1121 | ||
605ec166 YS |
1122 | bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); |
1123 | if (unlikely(!bctx)) { | |
4d71b38a | 1124 | rdma_error_report("No matching ctx for req %ld", cqe_ctx_id); |
c2dd117b | 1125 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
605ec166 YS |
1126 | return; |
1127 | } | |
1128 | ||
605ec166 YS |
1129 | mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr, |
1130 | bctx->sge.length); | |
2b05705d | 1131 | if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) { |
c2dd117b | 1132 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
eaac0100 YS |
1133 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF, |
1134 | bctx->up_ctx); | |
605ec166 | 1135 | } else { |
a421c811 | 1136 | struct ibv_wc wc = {}; |
605ec166 YS |
1137 | memset(mad, 0, bctx->sge.length); |
1138 | build_mad_hdr((struct ibv_grh *)mad, | |
2b05705d YS |
1139 | (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid, |
1140 | msg->umad_len); | |
1141 | memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len); | |
605ec166 YS |
1142 | rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length); |
1143 | ||
eaac0100 YS |
1144 | wc.byte_len = msg->umad_len; |
1145 | wc.status = IBV_WC_SUCCESS; | |
1146 | wc.wc_flags = IBV_WC_GRH; | |
c2dd117b | 1147 | backend_dev->rdma_dev_res->stats.mad_rx++; |
eaac0100 | 1148 | comp_handler(bctx->up_ctx, &wc); |
605ec166 YS |
1149 | } |
1150 | ||
1151 | g_free(bctx); | |
1152 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); | |
1153 | } | |
1154 | ||
2b05705d | 1155 | static inline int rdmacm_mux_can_receive(void *opaque) |
605ec166 | 1156 | { |
2b05705d | 1157 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; |
605ec166 | 1158 | |
2b05705d YS |
1159 | return rdmacm_mux_can_process_async(backend_dev); |
1160 | } | |
1161 | ||
1162 | static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size) | |
1163 | { | |
1164 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; | |
1165 | RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf; | |
1166 | ||
4d71b38a | 1167 | trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
1168 | |
1169 | if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ && | |
1170 | msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) { | |
4d71b38a | 1171 | rdma_error_report("Error: Not a MAD request, skipping"); |
2b05705d | 1172 | return; |
605ec166 | 1173 | } |
2b05705d YS |
1174 | process_incoming_mad_req(backend_dev, msg); |
1175 | } | |
1176 | ||
1177 | static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) | |
1178 | { | |
1179 | int ret; | |
605ec166 | 1180 | |
2b05705d | 1181 | backend_dev->rdmacm_mux.chr_be = mad_chr_be; |
605ec166 | 1182 | |
2b05705d YS |
1183 | ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be); |
1184 | if (!ret) { | |
4d71b38a | 1185 | rdma_error_report("Missing chardev for MAD multiplexer"); |
2b05705d | 1186 | return -EIO; |
605ec166 YS |
1187 | } |
1188 | ||
b20fc795 | 1189 | rdma_protected_qlist_init(&backend_dev->recv_mads_list); |
605ec166 | 1190 | |
2b05705d YS |
1191 | enable_rdmacm_mux_async(backend_dev); |
1192 | ||
1193 | qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be, | |
1194 | rdmacm_mux_can_receive, rdmacm_mux_read, NULL, | |
1195 | NULL, backend_dev, NULL, true); | |
1196 | ||
605ec166 YS |
1197 | return 0; |
1198 | } | |
1199 | ||
ff30a446 YS |
1200 | static void mad_stop(RdmaBackendDev *backend_dev) |
1201 | { | |
1202 | clean_recv_mads(backend_dev); | |
1203 | } | |
1204 | ||
605ec166 YS |
1205 | static void mad_fini(RdmaBackendDev *backend_dev) |
1206 | { | |
2b05705d YS |
1207 | disable_rdmacm_mux_async(backend_dev); |
1208 | qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be); | |
b20fc795 | 1209 | rdma_protected_qlist_destroy(&backend_dev->recv_mads_list); |
605ec166 YS |
1210 | } |
1211 | ||
2b05705d YS |
1212 | int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, |
1213 | union ibv_gid *gid) | |
1214 | { | |
1215 | union ibv_gid sgid; | |
1216 | int ret; | |
1217 | int i = 0; | |
1218 | ||
2b05705d YS |
1219 | do { |
1220 | ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i, | |
1221 | &sgid); | |
1222 | i++; | |
1223 | } while (!ret && (memcmp(&sgid, gid, sizeof(*gid)))); | |
1224 | ||
4d71b38a YS |
1225 | trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix), |
1226 | be64_to_cpu(gid->global.interface_id), | |
1227 | i - 1); | |
2b05705d YS |
1228 | |
1229 | return ret ? ret : i - 1; | |
1230 | } | |
1231 | ||
1232 | int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1233 | union ibv_gid *gid) | |
1234 | { | |
555b3d67 | 1235 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1236 | int ret; |
1237 | ||
4d71b38a YS |
1238 | trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix), |
1239 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1240 | |
1241 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG; | |
1242 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1243 | ||
4d71b38a | 1244 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1245 | if (ret) { |
4d71b38a | 1246 | rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret); |
2b05705d YS |
1247 | return -EIO; |
1248 | } | |
1249 | ||
1250 | qapi_event_send_rdma_gid_status_changed(ifname, true, | |
1251 | gid->global.subnet_prefix, | |
1252 | gid->global.interface_id); | |
1253 | ||
1254 | return ret; | |
1255 | } | |
1256 | ||
1257 | int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1258 | union ibv_gid *gid) | |
1259 | { | |
555b3d67 | 1260 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1261 | int ret; |
1262 | ||
4d71b38a YS |
1263 | trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix), |
1264 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1265 | |
1266 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG; | |
1267 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1268 | ||
4d71b38a | 1269 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1270 | if (ret) { |
4d71b38a YS |
1271 | rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)", |
1272 | ret); | |
2b05705d YS |
1273 | return -EIO; |
1274 | } | |
1275 | ||
1276 | qapi_event_send_rdma_gid_status_changed(ifname, false, | |
1277 | gid->global.subnet_prefix, | |
1278 | gid->global.interface_id); | |
1279 | ||
1280 | return 0; | |
1281 | } | |
1282 | ||
430e440c | 1283 | int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, |
ef6d4ccd YS |
1284 | RdmaDeviceResources *rdma_dev_res, |
1285 | const char *backend_device_name, uint8_t port_num, | |
4d71b38a | 1286 | struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be) |
ef6d4ccd YS |
1287 | { |
1288 | int i; | |
1289 | int ret = 0; | |
1290 | int num_ibv_devices; | |
ef6d4ccd | 1291 | struct ibv_device **dev_list; |
ef6d4ccd | 1292 | |
430e440c YS |
1293 | memset(backend_dev, 0, sizeof(*backend_dev)); |
1294 | ||
1295 | backend_dev->dev = pdev; | |
ef6d4ccd YS |
1296 | backend_dev->port_num = port_num; |
1297 | backend_dev->rdma_dev_res = rdma_dev_res; | |
1298 | ||
1299 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
1300 | ||
1301 | dev_list = ibv_get_device_list(&num_ibv_devices); | |
1302 | if (!dev_list) { | |
4d71b38a | 1303 | rdma_error_report("Failed to get IB devices list"); |
ef6d4ccd YS |
1304 | return -EIO; |
1305 | } | |
1306 | ||
1307 | if (num_ibv_devices == 0) { | |
4d71b38a | 1308 | rdma_error_report("No IB devices were found"); |
ef6d4ccd YS |
1309 | ret = -ENXIO; |
1310 | goto out_free_dev_list; | |
1311 | } | |
1312 | ||
1313 | if (backend_device_name) { | |
1314 | for (i = 0; dev_list[i]; ++i) { | |
1315 | if (!strcmp(ibv_get_device_name(dev_list[i]), | |
1316 | backend_device_name)) { | |
1317 | break; | |
1318 | } | |
1319 | } | |
1320 | ||
1321 | backend_dev->ib_dev = dev_list[i]; | |
1322 | if (!backend_dev->ib_dev) { | |
4d71b38a YS |
1323 | rdma_error_report("Failed to find IB device %s", |
1324 | backend_device_name); | |
ef6d4ccd YS |
1325 | ret = -EIO; |
1326 | goto out_free_dev_list; | |
1327 | } | |
1328 | } else { | |
1329 | backend_dev->ib_dev = *dev_list; | |
1330 | } | |
1331 | ||
4d71b38a | 1332 | rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name); |
ef6d4ccd YS |
1333 | |
1334 | backend_dev->context = ibv_open_device(backend_dev->ib_dev); | |
1335 | if (!backend_dev->context) { | |
4d71b38a YS |
1336 | rdma_error_report("Failed to open IB device %s", |
1337 | ibv_get_device_name(backend_dev->ib_dev)); | |
ef6d4ccd YS |
1338 | ret = -EIO; |
1339 | goto out; | |
1340 | } | |
1341 | ||
1342 | backend_dev->channel = ibv_create_comp_channel(backend_dev->context); | |
1343 | if (!backend_dev->channel) { | |
4d71b38a | 1344 | rdma_error_report("Failed to create IB communication channel"); |
ef6d4ccd YS |
1345 | ret = -EIO; |
1346 | goto out_close_device; | |
1347 | } | |
ef6d4ccd | 1348 | |
ef6d4ccd YS |
1349 | ret = init_device_caps(backend_dev, dev_attr); |
1350 | if (ret) { | |
4d71b38a | 1351 | rdma_error_report("Failed to initialize device capabilities"); |
ef6d4ccd YS |
1352 | ret = -EIO; |
1353 | goto out_destroy_comm_channel; | |
1354 | } | |
1355 | ||
ef6d4ccd | 1356 | |
2b05705d | 1357 | ret = mad_init(backend_dev, mad_chr_be); |
605ec166 | 1358 | if (ret) { |
4d71b38a | 1359 | rdma_error_report("Failed to initialize mad"); |
605ec166 YS |
1360 | ret = -EIO; |
1361 | goto out_destroy_comm_channel; | |
1362 | } | |
1363 | ||
75152227 YS |
1364 | backend_dev->comp_thread.run = false; |
1365 | backend_dev->comp_thread.is_running = false; | |
ef6d4ccd YS |
1366 | |
1367 | ah_cache_init(); | |
1368 | ||
1369 | goto out_free_dev_list; | |
1370 | ||
1371 | out_destroy_comm_channel: | |
1372 | ibv_destroy_comp_channel(backend_dev->channel); | |
1373 | ||
1374 | out_close_device: | |
1375 | ibv_close_device(backend_dev->context); | |
1376 | ||
1377 | out_free_dev_list: | |
1378 | ibv_free_device_list(dev_list); | |
1379 | ||
1380 | out: | |
1381 | return ret; | |
1382 | } | |
1383 | ||
75152227 YS |
1384 | |
1385 | void rdma_backend_start(RdmaBackendDev *backend_dev) | |
1386 | { | |
75152227 YS |
1387 | start_comp_thread(backend_dev); |
1388 | } | |
1389 | ||
1390 | void rdma_backend_stop(RdmaBackendDev *backend_dev) | |
1391 | { | |
ff30a446 | 1392 | mad_stop(backend_dev); |
292dce62 | 1393 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
1394 | } |
1395 | ||
ef6d4ccd YS |
1396 | void rdma_backend_fini(RdmaBackendDev *backend_dev) |
1397 | { | |
605ec166 | 1398 | mad_fini(backend_dev); |
ef6d4ccd YS |
1399 | g_hash_table_destroy(ah_hash); |
1400 | ibv_destroy_comp_channel(backend_dev->channel); | |
1401 | ibv_close_device(backend_dev->context); | |
1402 | } |