]> git.proxmox.com Git - mirror_qemu.git/blame - hw/rdma/vmw/pvrdma_qp_ops.c
hw/rdma: Modify debug macros
[mirror_qemu.git] / hw / rdma / vmw / pvrdma_qp_ops.c
CommitLineData
98d176f8
YS
1/*
2 * QEMU paravirtual RDMA - QP implementation
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
0efc9511 16#include "qemu/osdep.h"
98d176f8
YS
17
18#include "../rdma_utils.h"
19#include "../rdma_rm.h"
20#include "../rdma_backend.h"
21
22#include "pvrdma.h"
0efc9511 23#include "standard-headers/rdma/vmw_pvrdma-abi.h"
98d176f8
YS
24#include "pvrdma_qp_ops.h"
25
26typedef struct CompHandlerCtx {
27 PVRDMADev *dev;
28 uint32_t cq_handle;
29 struct pvrdma_cqe cqe;
30} CompHandlerCtx;
31
32/* Send Queue WQE */
33typedef struct PvrdmaSqWqe {
34 struct pvrdma_sq_wqe_hdr hdr;
35 struct pvrdma_sge sge[0];
36} PvrdmaSqWqe;
37
38/* Recv Queue WQE */
39typedef struct PvrdmaRqWqe {
40 struct pvrdma_rq_wqe_hdr hdr;
41 struct pvrdma_sge sge[0];
42} PvrdmaRqWqe;
43
44/*
45 * 1. Put CQE on send CQ ring
46 * 2. Put CQ number on dsr completion ring
47 * 3. Interrupt host
48 */
49static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle,
50 struct pvrdma_cqe *cqe)
51{
52 struct pvrdma_cqe *cqe1;
53 struct pvrdma_cqne *cqne;
54 PvrdmaRing *ring;
55 RdmaRmCQ *cq = rdma_rm_get_cq(&dev->rdma_dev_res, cq_handle);
56
57 if (unlikely(!cq)) {
58 pr_dbg("Invalid cqn %d\n", cq_handle);
59 return -EINVAL;
60 }
61
62 ring = (PvrdmaRing *)cq->opaque;
63 pr_dbg("ring=%p\n", ring);
64
65 /* Step #1: Put CQE on CQ ring */
66 pr_dbg("Writing CQE\n");
67 cqe1 = pvrdma_ring_next_elem_write(ring);
68 if (unlikely(!cqe1)) {
69 return -EINVAL;
70 }
71
72 cqe1->wr_id = cqe->wr_id;
73 cqe1->qp = cqe->qp;
74 cqe1->opcode = cqe->opcode;
75 cqe1->status = cqe->status;
76 cqe1->vendor_err = cqe->vendor_err;
77
78 pvrdma_ring_write_inc(ring);
79
80 /* Step #2: Put CQ number on dsr completion ring */
81 pr_dbg("Writing CQNE\n");
82 cqne = pvrdma_ring_next_elem_write(&dev->dsr_info.cq);
83 if (unlikely(!cqne)) {
84 return -EINVAL;
85 }
86
87 cqne->info = cq_handle;
88 pvrdma_ring_write_inc(&dev->dsr_info.cq);
89
90 pr_dbg("cq->notify=%d\n", cq->notify);
91 if (cq->notify) {
92 cq->notify = false;
93 post_interrupt(dev, INTR_VEC_CMD_COMPLETION_Q);
94 }
95
96 return 0;
97}
98
99static void pvrdma_qp_ops_comp_handler(int status, unsigned int vendor_err,
100 void *ctx)
101{
102 CompHandlerCtx *comp_ctx = (CompHandlerCtx *)ctx;
103
104 pr_dbg("cq_handle=%d\n", comp_ctx->cq_handle);
6f559013 105 pr_dbg("wr_id=%" PRIx64 "\n", comp_ctx->cqe.wr_id);
98d176f8
YS
106 pr_dbg("status=%d\n", status);
107 pr_dbg("vendor_err=0x%x\n", vendor_err);
108 comp_ctx->cqe.status = status;
109 comp_ctx->cqe.vendor_err = vendor_err;
110 pvrdma_post_cqe(comp_ctx->dev, comp_ctx->cq_handle, &comp_ctx->cqe);
111 g_free(ctx);
112}
113
114void pvrdma_qp_ops_fini(void)
115{
116 rdma_backend_unregister_comp_handler();
117}
118
119int pvrdma_qp_ops_init(void)
120{
121 rdma_backend_register_comp_handler(pvrdma_qp_ops_comp_handler);
122
123 return 0;
124}
125
126int pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle)
127{
128 RdmaRmQP *qp;
129 PvrdmaSqWqe *wqe;
130 PvrdmaRing *ring;
131
132 pr_dbg("qp_handle=%d\n", qp_handle);
133
134 qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle);
135 if (unlikely(!qp)) {
136 return -EINVAL;
137 }
138
139 ring = (PvrdmaRing *)qp->opaque;
140 pr_dbg("sring=%p\n", ring);
141
142 wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring);
143 while (wqe) {
144 CompHandlerCtx *comp_ctx;
145
6f559013 146 pr_dbg("wr_id=%" PRIx64 "\n", wqe->hdr.wr_id);
98d176f8
YS
147
148 /* Prepare CQE */
149 comp_ctx = g_malloc(sizeof(CompHandlerCtx));
150 comp_ctx->dev = dev;
151 comp_ctx->cq_handle = qp->send_cq_handle;
152 comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
153 comp_ctx->cqe.qp = qp_handle;
154 comp_ctx->cqe.opcode = wqe->hdr.opcode;
155
156 rdma_backend_post_send(&dev->backend_dev, &qp->backend_qp, qp->qp_type,
157 (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge,
158 (union ibv_gid *)wqe->hdr.wr.ud.av.dgid,
159 wqe->hdr.wr.ud.remote_qpn,
160 wqe->hdr.wr.ud.remote_qkey, comp_ctx);
161
162 pvrdma_ring_read_inc(ring);
163
164 wqe = pvrdma_ring_next_elem_read(ring);
165 }
166
167 return 0;
168}
169
170int pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
171{
172 RdmaRmQP *qp;
173 PvrdmaRqWqe *wqe;
174 PvrdmaRing *ring;
175
176 pr_dbg("qp_handle=%d\n", qp_handle);
177
178 qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle);
179 if (unlikely(!qp)) {
180 return -EINVAL;
181 }
182
183 ring = &((PvrdmaRing *)qp->opaque)[1];
184 pr_dbg("rring=%p\n", ring);
185
186 wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
187 while (wqe) {
188 CompHandlerCtx *comp_ctx;
189
6f559013 190 pr_dbg("wr_id=%" PRIx64 "\n", wqe->hdr.wr_id);
98d176f8
YS
191
192 /* Prepare CQE */
193 comp_ctx = g_malloc(sizeof(CompHandlerCtx));
194 comp_ctx->dev = dev;
195 comp_ctx->cq_handle = qp->recv_cq_handle;
196 comp_ctx->cqe.qp = qp_handle;
197 comp_ctx->cqe.wr_id = wqe->hdr.wr_id;
198
199 rdma_backend_post_recv(&dev->backend_dev, &dev->rdma_dev_res,
200 &qp->backend_qp, qp->qp_type,
201 (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge,
202 comp_ctx);
203
204 pvrdma_ring_read_inc(ring);
205
206 wqe = pvrdma_ring_next_elem_read(ring);
207 }
208
209 return 0;
210}
211
212void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle)
213{
214 RdmaRmCQ *cq;
215
216 cq = rdma_rm_get_cq(dev_res, cq_handle);
217 if (!cq) {
218 pr_dbg("Invalid CQ# %d\n", cq_handle);
b0197cf8 219 return;
98d176f8
YS
220 }
221
222 rdma_backend_poll_cq(dev_res, &cq->backend_cq);
223}