2 * QEMU paravirtual RDMA - Generic RDMA backend
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "sysemu/sysemu.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qlist.h"
20 #include "qapi/qmp/qnum.h"
21 #include "qapi/qapi-events-rdma.h"
23 #include <infiniband/verbs.h>
24 #include <infiniband/umad_types.h>
25 #include <infiniband/umad.h>
26 #include <rdma/rdma_user_cm.h>
28 #include "contrib/rdmacm-mux/rdmacm-mux.h"
30 #include "rdma_utils.h"
32 #include "rdma_backend.h"
34 #define THR_NAME_LEN 16
35 #define THR_POLL_TO 5000
37 #define MAD_HDR_SIZE sizeof(struct ibv_grh)
39 typedef struct BackendCtx
{
41 struct ibv_sge sge
; /* Used to save MAD recv buffer */
45 struct ib_user_mad hdr
;
46 char mad
[RDMA_MAX_PRIVATE_DATA
];
49 static void (*comp_handler
)(void *ctx
, struct ibv_wc
*wc
);
51 static void dummy_comp_handler(void *ctx
, struct ibv_wc
*wc
)
53 rdma_error_report("No completion handler is registered");
56 static inline void complete_work(enum ibv_wc_status status
, uint32_t vendor_err
,
59 struct ibv_wc wc
= {0};
62 wc
.vendor_err
= vendor_err
;
64 comp_handler(ctx
, &wc
);
67 static void rdma_poll_cq(RdmaDeviceResources
*rdma_dev_res
, struct ibv_cq
*ibcq
)
74 ne
= ibv_poll_cq(ibcq
, ARRAY_SIZE(wc
), wc
);
76 trace_rdma_poll_cq(ne
, ibcq
);
78 for (i
= 0; i
< ne
; i
++) {
79 bctx
= rdma_rm_get_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
80 if (unlikely(!bctx
)) {
81 rdma_error_report("No matching ctx for req %"PRId64
,
86 comp_handler(bctx
->up_ctx
, &wc
[i
]);
88 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
94 rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne
, errno
);
98 static void *comp_handler_thread(void *arg
)
100 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)arg
;
102 struct ibv_cq
*ev_cq
;
107 /* Change to non-blocking mode */
108 flags
= fcntl(backend_dev
->channel
->fd
, F_GETFL
);
109 rc
= fcntl(backend_dev
->channel
->fd
, F_SETFL
, flags
| O_NONBLOCK
);
111 rdma_error_report("Failed to change backend channel FD to non-blocking");
115 pfds
[0].fd
= backend_dev
->channel
->fd
;
116 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
118 backend_dev
->comp_thread
.is_running
= true;
120 while (backend_dev
->comp_thread
.run
) {
122 rc
= qemu_poll_ns(pfds
, 1, THR_POLL_TO
* (int64_t)SCALE_MS
);
123 } while (!rc
&& backend_dev
->comp_thread
.run
);
125 if (backend_dev
->comp_thread
.run
) {
126 rc
= ibv_get_cq_event(backend_dev
->channel
, &ev_cq
, &ev_ctx
);
128 rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc
,
133 rc
= ibv_req_notify_cq(ev_cq
, 0);
135 rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
,
139 rdma_poll_cq(backend_dev
->rdma_dev_res
, ev_cq
);
141 ibv_ack_cq_events(ev_cq
, 1);
145 /* TODO: Post cqe for all remaining buffs that were posted */
147 backend_dev
->comp_thread
.is_running
= false;
154 static inline void disable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
156 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, 0);
159 static inline void enable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
161 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, sizeof(RdmaCmMuxMsg
));
164 static inline int rdmacm_mux_can_process_async(RdmaBackendDev
*backend_dev
)
166 return atomic_read(&backend_dev
->rdmacm_mux
.can_receive
);
169 static int rdmacm_mux_check_op_status(CharBackend
*mad_chr_be
)
171 RdmaCmMuxMsg msg
= {};
174 ret
= qemu_chr_fe_read_all(mad_chr_be
, (uint8_t *)&msg
, sizeof(msg
));
175 if (ret
!= sizeof(msg
)) {
176 rdma_error_report("Got invalid message from mux: size %d, expecting %d",
177 ret
, (int)sizeof(msg
));
181 trace_rdmacm_mux_check_op_status(msg
.hdr
.msg_type
, msg
.hdr
.op_code
,
184 if (msg
.hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_RESP
) {
185 rdma_error_report("Got invalid message type %d", msg
.hdr
.msg_type
);
189 if (msg
.hdr
.err_code
!= RDMACM_MUX_ERR_CODE_OK
) {
190 rdma_error_report("Operation failed in mux, error code %d",
198 static int rdmacm_mux_send(RdmaBackendDev
*backend_dev
, RdmaCmMuxMsg
*msg
)
202 msg
->hdr
.msg_type
= RDMACM_MUX_MSG_TYPE_REQ
;
203 trace_rdmacm_mux("send", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
204 disable_rdmacm_mux_async(backend_dev
);
205 rc
= qemu_chr_fe_write(backend_dev
->rdmacm_mux
.chr_be
,
206 (const uint8_t *)msg
, sizeof(*msg
));
207 if (rc
!= sizeof(*msg
)) {
208 enable_rdmacm_mux_async(backend_dev
);
209 rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc
);
213 rc
= rdmacm_mux_check_op_status(backend_dev
->rdmacm_mux
.chr_be
);
215 rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
216 msg
->hdr
.op_code
, rc
);
219 enable_rdmacm_mux_async(backend_dev
);
224 static void stop_backend_thread(RdmaBackendThread
*thread
)
227 while (thread
->is_running
) {
228 sleep(THR_POLL_TO
/ SCALE_US
/ 2);
232 static void start_comp_thread(RdmaBackendDev
*backend_dev
)
234 char thread_name
[THR_NAME_LEN
] = {0};
236 stop_backend_thread(&backend_dev
->comp_thread
);
238 snprintf(thread_name
, sizeof(thread_name
), "rdma_comp_%s",
239 ibv_get_device_name(backend_dev
->ib_dev
));
240 backend_dev
->comp_thread
.run
= true;
241 qemu_thread_create(&backend_dev
->comp_thread
.thread
, thread_name
,
242 comp_handler_thread
, backend_dev
, QEMU_THREAD_DETACHED
);
245 void rdma_backend_register_comp_handler(void (*handler
)(void *ctx
,
248 comp_handler
= handler
;
251 void rdma_backend_unregister_comp_handler(void)
253 rdma_backend_register_comp_handler(dummy_comp_handler
);
256 int rdma_backend_query_port(RdmaBackendDev
*backend_dev
,
257 struct ibv_port_attr
*port_attr
)
261 rc
= ibv_query_port(backend_dev
->context
, backend_dev
->port_num
, port_attr
);
263 rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc
, errno
);
270 void rdma_backend_poll_cq(RdmaDeviceResources
*rdma_dev_res
, RdmaBackendCQ
*cq
)
272 rdma_poll_cq(rdma_dev_res
, cq
->ibcq
);
275 static GHashTable
*ah_hash
;
277 static struct ibv_ah
*create_ah(RdmaBackendDev
*backend_dev
, struct ibv_pd
*pd
,
278 uint8_t sgid_idx
, union ibv_gid
*dgid
)
280 GBytes
*ah_key
= g_bytes_new(dgid
, sizeof(*dgid
));
281 struct ibv_ah
*ah
= g_hash_table_lookup(ah_hash
, ah_key
);
284 trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid
->global
.subnet_prefix
),
285 be64_to_cpu(dgid
->global
.interface_id
));
286 g_bytes_unref(ah_key
);
288 struct ibv_ah_attr ah_attr
= {
290 .port_num
= backend_dev
->port_num
,
294 ah_attr
.grh
.dgid
= *dgid
;
295 ah_attr
.grh
.sgid_index
= sgid_idx
;
297 ah
= ibv_create_ah(pd
, &ah_attr
);
299 g_hash_table_insert(ah_hash
, ah_key
, ah
);
301 g_bytes_unref(ah_key
);
302 rdma_error_report("Failed to create AH for gid <0x%" PRIx64
", 0x%"PRIx64
">",
303 be64_to_cpu(dgid
->global
.subnet_prefix
),
304 be64_to_cpu(dgid
->global
.interface_id
));
307 trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid
->global
.subnet_prefix
),
308 be64_to_cpu(dgid
->global
.interface_id
));
314 static void destroy_ah_hash_key(gpointer data
)
319 static void destroy_ah_hast_data(gpointer data
)
321 struct ibv_ah
*ah
= data
;
326 static void ah_cache_init(void)
328 ah_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
329 destroy_ah_hash_key
, destroy_ah_hast_data
);
332 static int build_host_sge_array(RdmaDeviceResources
*rdma_dev_res
,
333 struct ibv_sge
*dsge
, struct ibv_sge
*ssge
,
339 for (ssge_idx
= 0; ssge_idx
< num_sge
; ssge_idx
++) {
340 mr
= rdma_rm_get_mr(rdma_dev_res
, ssge
[ssge_idx
].lkey
);
342 rdma_error_report("Invalid lkey 0x%x", ssge
[ssge_idx
].lkey
);
343 return VENDOR_ERR_INVLKEY
| ssge
[ssge_idx
].lkey
;
346 dsge
->addr
= (uintptr_t)mr
->virt
+ ssge
[ssge_idx
].addr
- mr
->start
;
347 dsge
->length
= ssge
[ssge_idx
].length
;
348 dsge
->lkey
= rdma_backend_mr_lkey(&mr
->backend_mr
);
356 static void trace_mad_message(const char *title
, char *buf
, int len
)
359 char *b
= g_malloc0(len
* 3 + 1);
362 for (i
= 0; i
< len
; i
++) {
363 sprintf(b1
, "%.2X ", buf
[i
] & 0x000000FF);
367 trace_rdma_mad_message(title
, len
, b
);
372 static int mad_send(RdmaBackendDev
*backend_dev
, uint8_t sgid_idx
,
373 union ibv_gid
*sgid
, struct ibv_sge
*sge
, uint32_t num_sge
)
375 RdmaCmMuxMsg msg
= {};
383 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_MAD
;
384 memcpy(msg
.hdr
.sgid
.raw
, sgid
->raw
, sizeof(msg
.hdr
.sgid
));
386 msg
.umad_len
= sge
[0].length
+ sge
[1].length
;
388 if (msg
.umad_len
> sizeof(msg
.umad
.mad
)) {
392 msg
.umad
.hdr
.addr
.qpn
= htobe32(1);
393 msg
.umad
.hdr
.addr
.grh_present
= 1;
394 msg
.umad
.hdr
.addr
.gid_index
= sgid_idx
;
395 memcpy(msg
.umad
.hdr
.addr
.gid
, sgid
->raw
, sizeof(msg
.umad
.hdr
.addr
.gid
));
396 msg
.umad
.hdr
.addr
.hop_limit
= 0xFF;
398 hdr
= rdma_pci_dma_map(backend_dev
->dev
, sge
[0].addr
, sge
[0].length
);
402 data
= rdma_pci_dma_map(backend_dev
->dev
, sge
[1].addr
, sge
[1].length
);
404 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
408 memcpy(&msg
.umad
.mad
[0], hdr
, sge
[0].length
);
409 memcpy(&msg
.umad
.mad
[sge
[0].length
], data
, sge
[1].length
);
411 rdma_pci_dma_unmap(backend_dev
->dev
, data
, sge
[1].length
);
412 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
414 trace_mad_message("send", msg
.umad
.mad
, msg
.umad_len
);
416 ret
= rdmacm_mux_send(backend_dev
, &msg
);
418 rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret
);
425 void rdma_backend_post_send(RdmaBackendDev
*backend_dev
,
426 RdmaBackendQP
*qp
, uint8_t qp_type
,
427 struct ibv_sge
*sge
, uint32_t num_sge
,
428 uint8_t sgid_idx
, union ibv_gid
*sgid
,
429 union ibv_gid
*dgid
, uint32_t dqpn
, uint32_t dqkey
,
433 struct ibv_sge new_sge
[MAX_SGE
];
436 struct ibv_send_wr wr
= {0}, *bad_wr
;
438 if (!qp
->ibqp
) { /* This field is not initialized for QP0 and QP1 */
439 if (qp_type
== IBV_QPT_SMI
) {
440 rdma_error_report("Got QP0 request");
441 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
442 } else if (qp_type
== IBV_QPT_GSI
) {
443 rc
= mad_send(backend_dev
, sgid_idx
, sgid
, sge
, num_sge
);
445 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_MAD_SEND
, ctx
);
447 complete_work(IBV_WC_SUCCESS
, 0, ctx
);
453 bctx
= g_malloc0(sizeof(*bctx
));
456 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
458 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
462 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
);
464 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
465 goto out_dealloc_cqe_ctx
;
468 if (qp_type
== IBV_QPT_UD
) {
469 wr
.wr
.ud
.ah
= create_ah(backend_dev
, qp
->ibpd
, sgid_idx
, dgid
);
471 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
472 goto out_dealloc_cqe_ctx
;
474 wr
.wr
.ud
.remote_qpn
= dqpn
;
475 wr
.wr
.ud
.remote_qkey
= dqkey
;
478 wr
.num_sge
= num_sge
;
479 wr
.opcode
= IBV_WR_SEND
;
480 wr
.send_flags
= IBV_SEND_SIGNALED
;
481 wr
.sg_list
= new_sge
;
484 rc
= ibv_post_send(qp
->ibqp
, &wr
, &bad_wr
);
486 rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
487 qp
->ibqp
->qp_num
, rc
, errno
);
488 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
489 goto out_dealloc_cqe_ctx
;
495 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
501 static unsigned int save_mad_recv_buffer(RdmaBackendDev
*backend_dev
,
502 struct ibv_sge
*sge
, uint32_t num_sge
,
510 rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge
);
511 return VENDOR_ERR_INV_NUM_SGE
;
514 if (sge
[0].length
< RDMA_MAX_PRIVATE_DATA
+ sizeof(struct ibv_grh
)) {
515 rdma_error_report("Too small buffer for MAD");
516 return VENDOR_ERR_INV_MAD_BUFF
;
519 bctx
= g_malloc0(sizeof(*bctx
));
521 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
524 return VENDOR_ERR_NOMEM
;
530 rdma_protected_qlist_append_int64(&backend_dev
->recv_mads_list
, bctx_id
);
535 void rdma_backend_post_recv(RdmaBackendDev
*backend_dev
,
536 RdmaDeviceResources
*rdma_dev_res
,
537 RdmaBackendQP
*qp
, uint8_t qp_type
,
538 struct ibv_sge
*sge
, uint32_t num_sge
, void *ctx
)
541 struct ibv_sge new_sge
[MAX_SGE
];
544 struct ibv_recv_wr wr
= {0}, *bad_wr
;
546 if (!qp
->ibqp
) { /* This field does not get initialized for QP0 and QP1 */
547 if (qp_type
== IBV_QPT_SMI
) {
548 rdma_error_report("Got QP0 request");
549 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
551 if (qp_type
== IBV_QPT_GSI
) {
552 rc
= save_mad_recv_buffer(backend_dev
, sge
, num_sge
, ctx
);
554 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
560 bctx
= g_malloc0(sizeof(*bctx
));
563 rc
= rdma_rm_alloc_cqe_ctx(rdma_dev_res
, &bctx_id
, bctx
);
565 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
569 rc
= build_host_sge_array(rdma_dev_res
, new_sge
, sge
, num_sge
);
571 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
572 goto out_dealloc_cqe_ctx
;
575 wr
.num_sge
= num_sge
;
576 wr
.sg_list
= new_sge
;
578 rc
= ibv_post_recv(qp
->ibqp
, &wr
, &bad_wr
);
580 rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
581 qp
->ibqp
->qp_num
, rc
, errno
);
582 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
583 goto out_dealloc_cqe_ctx
;
589 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, bctx_id
);
595 int rdma_backend_create_pd(RdmaBackendDev
*backend_dev
, RdmaBackendPD
*pd
)
597 pd
->ibpd
= ibv_alloc_pd(backend_dev
->context
);
600 rdma_error_report("ibv_alloc_pd fail, errno=%d", errno
);
607 void rdma_backend_destroy_pd(RdmaBackendPD
*pd
)
610 ibv_dealloc_pd(pd
->ibpd
);
614 int rdma_backend_create_mr(RdmaBackendMR
*mr
, RdmaBackendPD
*pd
, void *addr
,
615 size_t length
, int access
)
617 mr
->ibmr
= ibv_reg_mr(pd
->ibpd
, addr
, length
, access
);
619 rdma_error_report("ibv_reg_mr fail, errno=%d", errno
);
628 void rdma_backend_destroy_mr(RdmaBackendMR
*mr
)
631 ibv_dereg_mr(mr
->ibmr
);
635 int rdma_backend_create_cq(RdmaBackendDev
*backend_dev
, RdmaBackendCQ
*cq
,
640 cq
->ibcq
= ibv_create_cq(backend_dev
->context
, cqe
+ 1, NULL
,
641 backend_dev
->channel
, 0);
643 rdma_error_report("ibv_create_cq fail, errno=%d", errno
);
647 rc
= ibv_req_notify_cq(cq
->ibcq
, 0);
649 rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
, errno
);
652 cq
->backend_dev
= backend_dev
;
657 void rdma_backend_destroy_cq(RdmaBackendCQ
*cq
)
660 ibv_destroy_cq(cq
->ibcq
);
664 int rdma_backend_create_qp(RdmaBackendQP
*qp
, uint8_t qp_type
,
665 RdmaBackendPD
*pd
, RdmaBackendCQ
*scq
,
666 RdmaBackendCQ
*rcq
, uint32_t max_send_wr
,
667 uint32_t max_recv_wr
, uint32_t max_send_sge
,
668 uint32_t max_recv_sge
)
670 struct ibv_qp_init_attr attr
= {0};
685 rdma_error_report("Unsupported QP type %d", qp_type
);
689 attr
.qp_type
= qp_type
;
690 attr
.send_cq
= scq
->ibcq
;
691 attr
.recv_cq
= rcq
->ibcq
;
692 attr
.cap
.max_send_wr
= max_send_wr
;
693 attr
.cap
.max_recv_wr
= max_recv_wr
;
694 attr
.cap
.max_send_sge
= max_send_sge
;
695 attr
.cap
.max_recv_sge
= max_recv_sge
;
697 qp
->ibqp
= ibv_create_qp(pd
->ibpd
, &attr
);
699 rdma_error_report("ibv_create_qp fail, errno=%d", errno
);
705 /* TODO: Query QP to get max_inline_data and save it to be used in send */
710 int rdma_backend_qp_state_init(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
711 uint8_t qp_type
, uint32_t qkey
)
713 struct ibv_qp_attr attr
= {0};
716 attr_mask
= IBV_QP_STATE
| IBV_QP_PKEY_INDEX
| IBV_QP_PORT
;
717 attr
.qp_state
= IBV_QPS_INIT
;
719 attr
.port_num
= backend_dev
->port_num
;
723 attr_mask
|= IBV_QP_ACCESS_FLAGS
;
724 trace_rdma_backend_rc_qp_state_init(qp
->ibqp
->qp_num
);
729 attr_mask
|= IBV_QP_QKEY
;
730 trace_rdma_backend_ud_qp_state_init(qp
->ibqp
->qp_num
, qkey
);
734 rdma_error_report("Unsupported QP type %d", qp_type
);
738 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
740 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
747 int rdma_backend_qp_state_rtr(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
748 uint8_t qp_type
, uint8_t sgid_idx
,
749 union ibv_gid
*dgid
, uint32_t dqpn
,
750 uint32_t rq_psn
, uint32_t qkey
, bool use_qkey
)
752 struct ibv_qp_attr attr
= {0};
753 union ibv_gid ibv_gid
= {
754 .global
.interface_id
= dgid
->global
.interface_id
,
755 .global
.subnet_prefix
= dgid
->global
.subnet_prefix
759 attr
.qp_state
= IBV_QPS_RTR
;
760 attr_mask
= IBV_QP_STATE
;
762 qp
->sgid_idx
= sgid_idx
;
766 attr
.path_mtu
= IBV_MTU_1024
;
767 attr
.dest_qp_num
= dqpn
;
768 attr
.max_dest_rd_atomic
= 1;
769 attr
.min_rnr_timer
= 12;
770 attr
.ah_attr
.port_num
= backend_dev
->port_num
;
771 attr
.ah_attr
.is_global
= 1;
772 attr
.ah_attr
.grh
.hop_limit
= 1;
773 attr
.ah_attr
.grh
.dgid
= ibv_gid
;
774 attr
.ah_attr
.grh
.sgid_index
= qp
->sgid_idx
;
775 attr
.rq_psn
= rq_psn
;
777 attr_mask
|= IBV_QP_AV
| IBV_QP_PATH_MTU
| IBV_QP_DEST_QPN
|
778 IBV_QP_RQ_PSN
| IBV_QP_MAX_DEST_RD_ATOMIC
|
779 IBV_QP_MIN_RNR_TIMER
;
781 trace_rdma_backend_rc_qp_state_rtr(qp
->ibqp
->qp_num
,
782 be64_to_cpu(ibv_gid
.global
.
784 be64_to_cpu(ibv_gid
.global
.
786 qp
->sgid_idx
, dqpn
, rq_psn
);
792 attr_mask
|= IBV_QP_QKEY
;
794 trace_rdma_backend_ud_qp_state_rtr(qp
->ibqp
->qp_num
, use_qkey
? qkey
:
799 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
801 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
808 int rdma_backend_qp_state_rts(RdmaBackendQP
*qp
, uint8_t qp_type
,
809 uint32_t sq_psn
, uint32_t qkey
, bool use_qkey
)
811 struct ibv_qp_attr attr
= {0};
814 attr
.qp_state
= IBV_QPS_RTS
;
815 attr
.sq_psn
= sq_psn
;
816 attr_mask
= IBV_QP_STATE
| IBV_QP_SQ_PSN
;
823 attr
.max_rd_atomic
= 1;
825 attr_mask
|= IBV_QP_TIMEOUT
| IBV_QP_RETRY_CNT
| IBV_QP_RNR_RETRY
|
826 IBV_QP_MAX_QP_RD_ATOMIC
;
827 trace_rdma_backend_rc_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
);
833 attr_mask
|= IBV_QP_QKEY
;
835 trace_rdma_backend_ud_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
,
836 use_qkey
? qkey
: 0);
840 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
842 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
849 int rdma_backend_query_qp(RdmaBackendQP
*qp
, struct ibv_qp_attr
*attr
,
850 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
853 attr
->qp_state
= IBV_QPS_RTS
;
857 return ibv_query_qp(qp
->ibqp
, attr
, attr_mask
, init_attr
);
860 void rdma_backend_destroy_qp(RdmaBackendQP
*qp
)
863 ibv_destroy_qp(qp
->ibqp
);
867 #define CHK_ATTR(req, dev, member, fmt) ({ \
868 trace_rdma_check_dev_attr(#member, dev.member, req->member); \
869 if (req->member > dev.member) { \
870 rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
871 #member, req->member, dev.member); \
872 req->member = dev.member; \
876 static int init_device_caps(RdmaBackendDev
*backend_dev
,
877 struct ibv_device_attr
*dev_attr
)
879 struct ibv_device_attr bk_dev_attr
;
882 rc
= ibv_query_device(backend_dev
->context
, &bk_dev_attr
);
884 rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc
, errno
);
888 dev_attr
->max_sge
= MAX_SGE
;
890 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr_size
, "%" PRId64
);
891 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp
, "%d");
892 CHK_ATTR(dev_attr
, bk_dev_attr
, max_sge
, "%d");
893 CHK_ATTR(dev_attr
, bk_dev_attr
, max_cq
, "%d");
894 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr
, "%d");
895 CHK_ATTR(dev_attr
, bk_dev_attr
, max_pd
, "%d");
896 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_rd_atom
, "%d");
897 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_init_rd_atom
, "%d");
898 CHK_ATTR(dev_attr
, bk_dev_attr
, max_ah
, "%d");
903 static inline void build_mad_hdr(struct ibv_grh
*grh
, union ibv_gid
*sgid
,
904 union ibv_gid
*my_gid
, int paylen
)
906 grh
->paylen
= htons(paylen
);
911 static void process_incoming_mad_req(RdmaBackendDev
*backend_dev
,
914 unsigned long cqe_ctx_id
;
918 trace_mad_message("recv", msg
->umad
.mad
, msg
->umad_len
);
920 cqe_ctx_id
= rdma_protected_qlist_pop_int64(&backend_dev
->recv_mads_list
);
921 if (cqe_ctx_id
== -ENOENT
) {
922 rdma_warn_report("No more free MADs buffers, waiting for a while");
927 bctx
= rdma_rm_get_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
928 if (unlikely(!bctx
)) {
929 rdma_error_report("No matching ctx for req %ld", cqe_ctx_id
);
933 mad
= rdma_pci_dma_map(backend_dev
->dev
, bctx
->sge
.addr
,
935 if (!mad
|| bctx
->sge
.length
< msg
->umad_len
+ MAD_HDR_SIZE
) {
936 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_INV_MAD_BUFF
,
939 struct ibv_wc wc
= {0};
940 memset(mad
, 0, bctx
->sge
.length
);
941 build_mad_hdr((struct ibv_grh
*)mad
,
942 (union ibv_gid
*)&msg
->umad
.hdr
.addr
.gid
, &msg
->hdr
.sgid
,
944 memcpy(&mad
[MAD_HDR_SIZE
], msg
->umad
.mad
, msg
->umad_len
);
945 rdma_pci_dma_unmap(backend_dev
->dev
, mad
, bctx
->sge
.length
);
947 wc
.byte_len
= msg
->umad_len
;
948 wc
.status
= IBV_WC_SUCCESS
;
949 wc
.wc_flags
= IBV_WC_GRH
;
950 comp_handler(bctx
->up_ctx
, &wc
);
954 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
957 static inline int rdmacm_mux_can_receive(void *opaque
)
959 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
961 return rdmacm_mux_can_process_async(backend_dev
);
964 static void rdmacm_mux_read(void *opaque
, const uint8_t *buf
, int size
)
966 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
967 RdmaCmMuxMsg
*msg
= (RdmaCmMuxMsg
*)buf
;
969 trace_rdmacm_mux("read", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
971 if (msg
->hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_REQ
&&
972 msg
->hdr
.op_code
!= RDMACM_MUX_OP_CODE_MAD
) {
973 rdma_error_report("Error: Not a MAD request, skipping");
976 process_incoming_mad_req(backend_dev
, msg
);
979 static int mad_init(RdmaBackendDev
*backend_dev
, CharBackend
*mad_chr_be
)
983 backend_dev
->rdmacm_mux
.chr_be
= mad_chr_be
;
985 ret
= qemu_chr_fe_backend_connected(backend_dev
->rdmacm_mux
.chr_be
);
987 rdma_error_report("Missing chardev for MAD multiplexer");
991 rdma_protected_qlist_init(&backend_dev
->recv_mads_list
);
993 enable_rdmacm_mux_async(backend_dev
);
995 qemu_chr_fe_set_handlers(backend_dev
->rdmacm_mux
.chr_be
,
996 rdmacm_mux_can_receive
, rdmacm_mux_read
, NULL
,
997 NULL
, backend_dev
, NULL
, true);
1002 static void mad_fini(RdmaBackendDev
*backend_dev
)
1004 disable_rdmacm_mux_async(backend_dev
);
1005 qemu_chr_fe_disconnect(backend_dev
->rdmacm_mux
.chr_be
);
1006 rdma_protected_qlist_destroy(&backend_dev
->recv_mads_list
);
1009 int rdma_backend_get_gid_index(RdmaBackendDev
*backend_dev
,
1017 ret
= ibv_query_gid(backend_dev
->context
, backend_dev
->port_num
, i
,
1020 } while (!ret
&& (memcmp(&sgid
, gid
, sizeof(*gid
))));
1022 trace_rdma_backend_get_gid_index(be64_to_cpu(gid
->global
.subnet_prefix
),
1023 be64_to_cpu(gid
->global
.interface_id
),
1026 return ret
? ret
: i
- 1;
1029 int rdma_backend_add_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1032 RdmaCmMuxMsg msg
= {};
1035 trace_rdma_backend_gid_change("add", be64_to_cpu(gid
->global
.subnet_prefix
),
1036 be64_to_cpu(gid
->global
.interface_id
));
1038 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_REG
;
1039 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1041 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1043 rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret
);
1047 qapi_event_send_rdma_gid_status_changed(ifname
, true,
1048 gid
->global
.subnet_prefix
,
1049 gid
->global
.interface_id
);
1054 int rdma_backend_del_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1057 RdmaCmMuxMsg msg
= {};
1060 trace_rdma_backend_gid_change("del", be64_to_cpu(gid
->global
.subnet_prefix
),
1061 be64_to_cpu(gid
->global
.interface_id
));
1063 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_UNREG
;
1064 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1066 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1068 rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
1073 qapi_event_send_rdma_gid_status_changed(ifname
, false,
1074 gid
->global
.subnet_prefix
,
1075 gid
->global
.interface_id
);
1080 int rdma_backend_init(RdmaBackendDev
*backend_dev
, PCIDevice
*pdev
,
1081 RdmaDeviceResources
*rdma_dev_res
,
1082 const char *backend_device_name
, uint8_t port_num
,
1083 struct ibv_device_attr
*dev_attr
, CharBackend
*mad_chr_be
)
1087 int num_ibv_devices
;
1088 struct ibv_device
**dev_list
;
1090 memset(backend_dev
, 0, sizeof(*backend_dev
));
1092 backend_dev
->dev
= pdev
;
1093 backend_dev
->port_num
= port_num
;
1094 backend_dev
->rdma_dev_res
= rdma_dev_res
;
1096 rdma_backend_register_comp_handler(dummy_comp_handler
);
1098 dev_list
= ibv_get_device_list(&num_ibv_devices
);
1100 rdma_error_report("Failed to get IB devices list");
1104 if (num_ibv_devices
== 0) {
1105 rdma_error_report("No IB devices were found");
1107 goto out_free_dev_list
;
1110 if (backend_device_name
) {
1111 for (i
= 0; dev_list
[i
]; ++i
) {
1112 if (!strcmp(ibv_get_device_name(dev_list
[i
]),
1113 backend_device_name
)) {
1118 backend_dev
->ib_dev
= dev_list
[i
];
1119 if (!backend_dev
->ib_dev
) {
1120 rdma_error_report("Failed to find IB device %s",
1121 backend_device_name
);
1123 goto out_free_dev_list
;
1126 backend_dev
->ib_dev
= *dev_list
;
1129 rdma_info_report("uverb device %s", backend_dev
->ib_dev
->dev_name
);
1131 backend_dev
->context
= ibv_open_device(backend_dev
->ib_dev
);
1132 if (!backend_dev
->context
) {
1133 rdma_error_report("Failed to open IB device %s",
1134 ibv_get_device_name(backend_dev
->ib_dev
));
1139 backend_dev
->channel
= ibv_create_comp_channel(backend_dev
->context
);
1140 if (!backend_dev
->channel
) {
1141 rdma_error_report("Failed to create IB communication channel");
1143 goto out_close_device
;
1146 ret
= init_device_caps(backend_dev
, dev_attr
);
1148 rdma_error_report("Failed to initialize device capabilities");
1150 goto out_destroy_comm_channel
;
1154 ret
= mad_init(backend_dev
, mad_chr_be
);
1156 rdma_error_report("Failed to initialize mad");
1158 goto out_destroy_comm_channel
;
1161 backend_dev
->comp_thread
.run
= false;
1162 backend_dev
->comp_thread
.is_running
= false;
1166 goto out_free_dev_list
;
1168 out_destroy_comm_channel
:
1169 ibv_destroy_comp_channel(backend_dev
->channel
);
1172 ibv_close_device(backend_dev
->context
);
1175 ibv_free_device_list(dev_list
);
1182 void rdma_backend_start(RdmaBackendDev
*backend_dev
)
1184 start_comp_thread(backend_dev
);
1187 void rdma_backend_stop(RdmaBackendDev
*backend_dev
)
1189 stop_backend_thread(&backend_dev
->comp_thread
);
1192 void rdma_backend_fini(RdmaBackendDev
*backend_dev
)
1194 rdma_backend_stop(backend_dev
);
1195 mad_fini(backend_dev
);
1196 g_hash_table_destroy(ah_hash
);
1197 ibv_destroy_comp_channel(backend_dev
->channel
);
1198 ibv_close_device(backend_dev
->context
);