]>
Commit | Line | Data |
---|---|---|
ef6d4ccd YS |
1 | /* |
2 | * QEMU paravirtual RDMA - Resource Manager Implementation | |
3 | * | |
4 | * Copyright (C) 2018 Oracle | |
5 | * Copyright (C) 2018 Red Hat Inc | |
6 | * | |
7 | * Authors: | |
8 | * Yuval Shaia <yuval.shaia@oracle.com> | |
9 | * Marcel Apfelbaum <marcel@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
0efc9511 MT |
16 | #include "qemu/osdep.h" |
17 | #include "qapi/error.h" | |
18 | #include "cpu.h" | |
ef6d4ccd YS |
19 | |
20 | #include "rdma_utils.h" | |
21 | #include "rdma_backend.h" | |
22 | #include "rdma_rm.h" | |
23 | ||
ef6d4ccd YS |
24 | /* Page directory and page tables */ |
25 | #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } | |
26 | #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } | |
27 | ||
28 | static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, | |
29 | uint32_t tbl_sz, uint32_t res_sz) | |
30 | { | |
31 | tbl->tbl = g_malloc(tbl_sz * res_sz); | |
32 | ||
33 | strncpy(tbl->name, name, MAX_RM_TBL_NAME); | |
34 | tbl->name[MAX_RM_TBL_NAME - 1] = 0; | |
35 | ||
36 | tbl->bitmap = bitmap_new(tbl_sz); | |
37 | tbl->tbl_sz = tbl_sz; | |
38 | tbl->res_sz = res_sz; | |
39 | qemu_mutex_init(&tbl->lock); | |
40 | } | |
41 | ||
42 | static inline void res_tbl_free(RdmaRmResTbl *tbl) | |
43 | { | |
44 | qemu_mutex_destroy(&tbl->lock); | |
45 | g_free(tbl->tbl); | |
9a3053d2 | 46 | g_free(tbl->bitmap); |
ef6d4ccd YS |
47 | } |
48 | ||
49 | static inline void *res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle) | |
50 | { | |
51 | pr_dbg("%s, handle=%d\n", tbl->name, handle); | |
52 | ||
53 | if ((handle < tbl->tbl_sz) && (test_bit(handle, tbl->bitmap))) { | |
54 | return tbl->tbl + handle * tbl->res_sz; | |
55 | } else { | |
56 | pr_dbg("Invalid handle %d\n", handle); | |
57 | return NULL; | |
58 | } | |
59 | } | |
60 | ||
61 | static inline void *res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle) | |
62 | { | |
63 | qemu_mutex_lock(&tbl->lock); | |
64 | ||
65 | *handle = find_first_zero_bit(tbl->bitmap, tbl->tbl_sz); | |
66 | if (*handle > tbl->tbl_sz) { | |
67 | pr_dbg("Failed to alloc, bitmap is full\n"); | |
68 | qemu_mutex_unlock(&tbl->lock); | |
69 | return NULL; | |
70 | } | |
71 | ||
72 | set_bit(*handle, tbl->bitmap); | |
73 | ||
74 | qemu_mutex_unlock(&tbl->lock); | |
75 | ||
76 | memset(tbl->tbl + *handle * tbl->res_sz, 0, tbl->res_sz); | |
77 | ||
78 | pr_dbg("%s, handle=%d\n", tbl->name, *handle); | |
79 | ||
80 | return tbl->tbl + *handle * tbl->res_sz; | |
81 | } | |
82 | ||
83 | static inline void res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) | |
84 | { | |
85 | pr_dbg("%s, handle=%d\n", tbl->name, handle); | |
86 | ||
87 | qemu_mutex_lock(&tbl->lock); | |
88 | ||
89 | if (handle < tbl->tbl_sz) { | |
90 | clear_bit(handle, tbl->bitmap); | |
91 | } | |
92 | ||
93 | qemu_mutex_unlock(&tbl->lock); | |
94 | } | |
95 | ||
96 | int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
97 | uint32_t *pd_handle, uint32_t ctx_handle) | |
98 | { | |
99 | RdmaRmPD *pd; | |
100 | int ret = -ENOMEM; | |
101 | ||
102 | pd = res_tbl_alloc(&dev_res->pd_tbl, pd_handle); | |
103 | if (!pd) { | |
104 | goto out; | |
105 | } | |
106 | ||
107 | ret = rdma_backend_create_pd(backend_dev, &pd->backend_pd); | |
108 | if (ret) { | |
109 | ret = -EIO; | |
110 | goto out_tbl_dealloc; | |
111 | } | |
112 | ||
113 | pd->ctx_handle = ctx_handle; | |
114 | ||
115 | return 0; | |
116 | ||
117 | out_tbl_dealloc: | |
118 | res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle); | |
119 | ||
120 | out: | |
121 | return ret; | |
122 | } | |
123 | ||
124 | RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) | |
125 | { | |
126 | return res_tbl_get(&dev_res->pd_tbl, pd_handle); | |
127 | } | |
128 | ||
129 | void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) | |
130 | { | |
131 | RdmaRmPD *pd = rdma_rm_get_pd(dev_res, pd_handle); | |
132 | ||
133 | if (pd) { | |
134 | rdma_backend_destroy_pd(&pd->backend_pd); | |
135 | res_tbl_dealloc(&dev_res->pd_tbl, pd_handle); | |
136 | } | |
137 | } | |
138 | ||
139 | int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, | |
140 | uint64_t guest_start, size_t guest_length, void *host_virt, | |
141 | int access_flags, uint32_t *mr_handle, uint32_t *lkey, | |
142 | uint32_t *rkey) | |
143 | { | |
144 | RdmaRmMR *mr; | |
145 | int ret = 0; | |
146 | RdmaRmPD *pd; | |
ef6d4ccd YS |
147 | |
148 | pd = rdma_rm_get_pd(dev_res, pd_handle); | |
149 | if (!pd) { | |
150 | pr_dbg("Invalid PD\n"); | |
151 | return -EINVAL; | |
152 | } | |
153 | ||
154 | mr = res_tbl_alloc(&dev_res->mr_tbl, mr_handle); | |
155 | if (!mr) { | |
156 | pr_dbg("Failed to allocate obj in table\n"); | |
157 | return -ENOMEM; | |
158 | } | |
4c408c9d | 159 | pr_dbg("mr_handle=%d\n", *mr_handle); |
ef6d4ccd | 160 | |
4c408c9d YS |
161 | pr_dbg("host_virt=0x%p\n", host_virt); |
162 | pr_dbg("guest_start=0x%" PRIx64 "\n", guest_start); | |
163 | pr_dbg("length=%zu\n", guest_length); | |
164 | ||
165 | if (host_virt) { | |
7f99daad | 166 | mr->virt = host_virt; |
7f99daad | 167 | mr->start = guest_start; |
4c408c9d | 168 | mr->length = guest_length; |
7d2ce4b0 | 169 | mr->virt += (mr->start & (TARGET_PAGE_SIZE - 1)); |
ef6d4ccd | 170 | |
4c408c9d YS |
171 | ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, mr->virt, |
172 | mr->length, access_flags); | |
173 | if (ret) { | |
174 | pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret); | |
175 | ret = -EIO; | |
176 | goto out_dealloc_mr; | |
177 | } | |
ef6d4ccd YS |
178 | } |
179 | ||
4c408c9d YS |
180 | /* We keep mr_handle in lkey so send and recv get get mr ptr */ |
181 | *lkey = *mr_handle; | |
182 | *rkey = -1; | |
ef6d4ccd YS |
183 | |
184 | mr->pd_handle = pd_handle; | |
185 | ||
186 | return 0; | |
187 | ||
188 | out_dealloc_mr: | |
189 | res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle); | |
190 | ||
191 | return ret; | |
192 | } | |
193 | ||
194 | RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) | |
195 | { | |
196 | return res_tbl_get(&dev_res->mr_tbl, mr_handle); | |
197 | } | |
198 | ||
199 | void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) | |
200 | { | |
201 | RdmaRmMR *mr = rdma_rm_get_mr(dev_res, mr_handle); | |
202 | ||
203 | if (mr) { | |
204 | rdma_backend_destroy_mr(&mr->backend_mr); | |
4c408c9d YS |
205 | pr_dbg("start=0x%" PRIx64 "\n", mr->start); |
206 | if (mr->start) { | |
7d2ce4b0 | 207 | mr->virt -= (mr->start & (TARGET_PAGE_SIZE - 1)); |
4c408c9d YS |
208 | munmap(mr->virt, mr->length); |
209 | } | |
ef6d4ccd YS |
210 | res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); |
211 | } | |
212 | } | |
213 | ||
214 | int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, | |
215 | uint32_t *uc_handle) | |
216 | { | |
217 | RdmaRmUC *uc; | |
218 | ||
219 | /* TODO: Need to make sure pfn is between bar start address and | |
220 | * bsd+RDMA_BAR2_UAR_SIZE | |
221 | if (pfn > RDMA_BAR2_UAR_SIZE) { | |
222 | pr_err("pfn out of range (%d > %d)\n", pfn, RDMA_BAR2_UAR_SIZE); | |
223 | return -ENOMEM; | |
224 | } | |
225 | */ | |
226 | ||
227 | uc = res_tbl_alloc(&dev_res->uc_tbl, uc_handle); | |
228 | if (!uc) { | |
229 | return -ENOMEM; | |
230 | } | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) | |
236 | { | |
237 | return res_tbl_get(&dev_res->uc_tbl, uc_handle); | |
238 | } | |
239 | ||
240 | void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) | |
241 | { | |
242 | RdmaRmUC *uc = rdma_rm_get_uc(dev_res, uc_handle); | |
243 | ||
244 | if (uc) { | |
245 | res_tbl_dealloc(&dev_res->uc_tbl, uc_handle); | |
246 | } | |
247 | } | |
248 | ||
249 | RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) | |
250 | { | |
251 | return res_tbl_get(&dev_res->cq_tbl, cq_handle); | |
252 | } | |
253 | ||
254 | int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
255 | uint32_t cqe, uint32_t *cq_handle, void *opaque) | |
256 | { | |
257 | int rc; | |
258 | RdmaRmCQ *cq; | |
259 | ||
260 | cq = res_tbl_alloc(&dev_res->cq_tbl, cq_handle); | |
261 | if (!cq) { | |
262 | return -ENOMEM; | |
263 | } | |
264 | ||
265 | cq->opaque = opaque; | |
4082e533 | 266 | cq->notify = CNT_CLEAR; |
ef6d4ccd YS |
267 | |
268 | rc = rdma_backend_create_cq(backend_dev, &cq->backend_cq, cqe); | |
269 | if (rc) { | |
270 | rc = -EIO; | |
271 | goto out_dealloc_cq; | |
272 | } | |
273 | ||
274 | return 0; | |
275 | ||
276 | out_dealloc_cq: | |
277 | rdma_rm_dealloc_cq(dev_res, *cq_handle); | |
278 | ||
279 | return rc; | |
280 | } | |
281 | ||
282 | void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, | |
283 | bool notify) | |
284 | { | |
285 | RdmaRmCQ *cq; | |
286 | ||
287 | pr_dbg("cq_handle=%d, notify=0x%x\n", cq_handle, notify); | |
288 | ||
289 | cq = rdma_rm_get_cq(dev_res, cq_handle); | |
290 | if (!cq) { | |
291 | return; | |
292 | } | |
293 | ||
4082e533 YS |
294 | if (cq->notify != CNT_SET) { |
295 | cq->notify = notify ? CNT_ARM : CNT_CLEAR; | |
296 | } | |
297 | ||
ef6d4ccd YS |
298 | pr_dbg("notify=%d\n", cq->notify); |
299 | } | |
300 | ||
301 | void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) | |
302 | { | |
303 | RdmaRmCQ *cq; | |
304 | ||
305 | cq = rdma_rm_get_cq(dev_res, cq_handle); | |
306 | if (!cq) { | |
307 | return; | |
308 | } | |
309 | ||
310 | rdma_backend_destroy_cq(&cq->backend_cq); | |
311 | ||
312 | res_tbl_dealloc(&dev_res->cq_tbl, cq_handle); | |
313 | } | |
314 | ||
315 | RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn) | |
316 | { | |
317 | GBytes *key = g_bytes_new(&qpn, sizeof(qpn)); | |
318 | ||
319 | RdmaRmQP *qp = g_hash_table_lookup(dev_res->qp_hash, key); | |
320 | ||
321 | g_bytes_unref(key); | |
322 | ||
323 | return qp; | |
324 | } | |
325 | ||
326 | int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, | |
327 | uint8_t qp_type, uint32_t max_send_wr, | |
328 | uint32_t max_send_sge, uint32_t send_cq_handle, | |
329 | uint32_t max_recv_wr, uint32_t max_recv_sge, | |
330 | uint32_t recv_cq_handle, void *opaque, uint32_t *qpn) | |
331 | { | |
332 | int rc; | |
333 | RdmaRmQP *qp; | |
334 | RdmaRmCQ *scq, *rcq; | |
335 | RdmaRmPD *pd; | |
336 | uint32_t rm_qpn; | |
337 | ||
338 | pr_dbg("qp_type=%d\n", qp_type); | |
339 | ||
340 | pd = rdma_rm_get_pd(dev_res, pd_handle); | |
341 | if (!pd) { | |
342 | pr_err("Invalid pd handle (%d)\n", pd_handle); | |
343 | return -EINVAL; | |
344 | } | |
345 | ||
346 | scq = rdma_rm_get_cq(dev_res, send_cq_handle); | |
347 | rcq = rdma_rm_get_cq(dev_res, recv_cq_handle); | |
348 | ||
349 | if (!scq || !rcq) { | |
350 | pr_err("Invalid send_cqn or recv_cqn (%d, %d)\n", | |
351 | send_cq_handle, recv_cq_handle); | |
352 | return -EINVAL; | |
353 | } | |
354 | ||
4082e533 YS |
355 | if (qp_type == IBV_QPT_GSI) { |
356 | scq->notify = CNT_SET; | |
357 | rcq->notify = CNT_SET; | |
358 | } | |
359 | ||
ef6d4ccd YS |
360 | qp = res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn); |
361 | if (!qp) { | |
362 | return -ENOMEM; | |
363 | } | |
364 | pr_dbg("rm_qpn=%d\n", rm_qpn); | |
365 | ||
366 | qp->qpn = rm_qpn; | |
367 | qp->qp_state = IBV_QPS_RESET; | |
368 | qp->qp_type = qp_type; | |
369 | qp->send_cq_handle = send_cq_handle; | |
370 | qp->recv_cq_handle = recv_cq_handle; | |
371 | qp->opaque = opaque; | |
372 | ||
373 | rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd, | |
374 | &scq->backend_cq, &rcq->backend_cq, max_send_wr, | |
375 | max_recv_wr, max_send_sge, max_recv_sge); | |
376 | if (rc) { | |
377 | rc = -EIO; | |
378 | goto out_dealloc_qp; | |
379 | } | |
380 | ||
381 | *qpn = rdma_backend_qpn(&qp->backend_qp); | |
382 | pr_dbg("rm_qpn=%d, backend_qpn=0x%x\n", rm_qpn, *qpn); | |
383 | g_hash_table_insert(dev_res->qp_hash, g_bytes_new(qpn, sizeof(*qpn)), qp); | |
384 | ||
385 | return 0; | |
386 | ||
387 | out_dealloc_qp: | |
388 | res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); | |
389 | ||
390 | return rc; | |
391 | } | |
392 | ||
393 | int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
2b05705d | 394 | uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx, |
ef6d4ccd YS |
395 | union ibv_gid *dgid, uint32_t dqpn, |
396 | enum ibv_qp_state qp_state, uint32_t qkey, | |
397 | uint32_t rq_psn, uint32_t sq_psn) | |
398 | { | |
399 | RdmaRmQP *qp; | |
400 | int ret; | |
401 | ||
abc665aa | 402 | pr_dbg("qpn=0x%x\n", qp_handle); |
2b05705d | 403 | pr_dbg("qkey=0x%x\n", qkey); |
ef6d4ccd YS |
404 | |
405 | qp = rdma_rm_get_qp(dev_res, qp_handle); | |
406 | if (!qp) { | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | pr_dbg("qp_type=%d\n", qp->qp_type); | |
411 | pr_dbg("attr_mask=0x%x\n", attr_mask); | |
412 | ||
413 | if (qp->qp_type == IBV_QPT_SMI) { | |
414 | pr_dbg("QP0 unsupported\n"); | |
415 | return -EPERM; | |
416 | } else if (qp->qp_type == IBV_QPT_GSI) { | |
417 | pr_dbg("QP1\n"); | |
418 | return 0; | |
419 | } | |
420 | ||
421 | if (attr_mask & IBV_QP_STATE) { | |
422 | qp->qp_state = qp_state; | |
423 | pr_dbg("qp_state=%d\n", qp->qp_state); | |
424 | ||
425 | if (qp->qp_state == IBV_QPS_INIT) { | |
426 | ret = rdma_backend_qp_state_init(backend_dev, &qp->backend_qp, | |
427 | qp->qp_type, qkey); | |
428 | if (ret) { | |
429 | return -EIO; | |
430 | } | |
431 | } | |
432 | ||
433 | if (qp->qp_state == IBV_QPS_RTR) { | |
2b05705d YS |
434 | /* Get backend gid index */ |
435 | pr_dbg("Guest sgid_idx=%d\n", sgid_idx); | |
436 | sgid_idx = rdma_rm_get_backend_gid_index(dev_res, backend_dev, | |
437 | sgid_idx); | |
438 | if (sgid_idx <= 0) { /* TODO check also less than bk.max_sgid */ | |
439 | pr_dbg("Fail to get bk sgid_idx for sgid_idx %d\n", sgid_idx); | |
440 | return -EIO; | |
441 | } | |
442 | ||
ef6d4ccd | 443 | ret = rdma_backend_qp_state_rtr(backend_dev, &qp->backend_qp, |
2b05705d YS |
444 | qp->qp_type, sgid_idx, dgid, dqpn, |
445 | rq_psn, qkey, | |
446 | attr_mask & IBV_QP_QKEY); | |
ef6d4ccd YS |
447 | if (ret) { |
448 | return -EIO; | |
449 | } | |
450 | } | |
451 | ||
452 | if (qp->qp_state == IBV_QPS_RTS) { | |
453 | ret = rdma_backend_qp_state_rts(&qp->backend_qp, qp->qp_type, | |
454 | sq_psn, qkey, | |
455 | attr_mask & IBV_QP_QKEY); | |
456 | if (ret) { | |
457 | return -EIO; | |
458 | } | |
459 | } | |
460 | } | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
c99f2174 YS |
465 | int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, |
466 | uint32_t qp_handle, struct ibv_qp_attr *attr, | |
467 | int attr_mask, struct ibv_qp_init_attr *init_attr) | |
468 | { | |
469 | RdmaRmQP *qp; | |
470 | ||
abc665aa | 471 | pr_dbg("qpn=0x%x\n", qp_handle); |
c99f2174 YS |
472 | |
473 | qp = rdma_rm_get_qp(dev_res, qp_handle); | |
474 | if (!qp) { | |
475 | return -EINVAL; | |
476 | } | |
477 | ||
478 | pr_dbg("qp_type=%d\n", qp->qp_type); | |
479 | ||
480 | return rdma_backend_query_qp(&qp->backend_qp, attr, attr_mask, init_attr); | |
481 | } | |
482 | ||
ef6d4ccd YS |
483 | void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle) |
484 | { | |
485 | RdmaRmQP *qp; | |
486 | GBytes *key; | |
487 | ||
488 | key = g_bytes_new(&qp_handle, sizeof(qp_handle)); | |
489 | qp = g_hash_table_lookup(dev_res->qp_hash, key); | |
490 | g_hash_table_remove(dev_res->qp_hash, key); | |
491 | g_bytes_unref(key); | |
492 | ||
493 | if (!qp) { | |
494 | return; | |
495 | } | |
496 | ||
497 | rdma_backend_destroy_qp(&qp->backend_qp); | |
498 | ||
499 | res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); | |
500 | } | |
501 | ||
502 | void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) | |
503 | { | |
504 | void **cqe_ctx; | |
505 | ||
506 | cqe_ctx = res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
507 | if (!cqe_ctx) { | |
508 | return NULL; | |
509 | } | |
510 | ||
511 | pr_dbg("ctx=%p\n", *cqe_ctx); | |
512 | ||
513 | return *cqe_ctx; | |
514 | } | |
515 | ||
516 | int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, | |
517 | void *ctx) | |
518 | { | |
519 | void **cqe_ctx; | |
520 | ||
521 | cqe_ctx = res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
522 | if (!cqe_ctx) { | |
523 | return -ENOMEM; | |
524 | } | |
525 | ||
526 | pr_dbg("ctx=%p\n", ctx); | |
527 | *cqe_ctx = ctx; | |
528 | ||
529 | return 0; | |
530 | } | |
531 | ||
532 | void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) | |
533 | { | |
534 | res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
535 | } | |
536 | ||
2b05705d YS |
537 | int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, |
538 | const char *ifname, union ibv_gid *gid, int gid_idx) | |
539 | { | |
540 | int rc; | |
541 | ||
542 | rc = rdma_backend_add_gid(backend_dev, ifname, gid); | |
543 | if (rc) { | |
544 | pr_dbg("Fail to add gid\n"); | |
545 | return -EINVAL; | |
546 | } | |
547 | ||
14c74f72 | 548 | memcpy(&dev_res->port.gid_tbl[gid_idx].gid, gid, sizeof(*gid)); |
2b05705d YS |
549 | |
550 | return 0; | |
551 | } | |
552 | ||
553 | int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
554 | const char *ifname, int gid_idx) | |
555 | { | |
556 | int rc; | |
557 | ||
305fd2ba YS |
558 | if (!dev_res->port.gid_tbl[gid_idx].gid.global.interface_id) { |
559 | return 0; | |
560 | } | |
561 | ||
2b05705d | 562 | rc = rdma_backend_del_gid(backend_dev, ifname, |
14c74f72 | 563 | &dev_res->port.gid_tbl[gid_idx].gid); |
2b05705d YS |
564 | if (rc) { |
565 | pr_dbg("Fail to delete gid\n"); | |
566 | return -EINVAL; | |
567 | } | |
568 | ||
14c74f72 YS |
569 | memset(dev_res->port.gid_tbl[gid_idx].gid.raw, 0, |
570 | sizeof(dev_res->port.gid_tbl[gid_idx].gid)); | |
571 | dev_res->port.gid_tbl[gid_idx].backend_gid_index = -1; | |
2b05705d YS |
572 | |
573 | return 0; | |
574 | } | |
575 | ||
576 | int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res, | |
577 | RdmaBackendDev *backend_dev, int sgid_idx) | |
578 | { | |
579 | if (unlikely(sgid_idx < 0 || sgid_idx > MAX_PORT_GIDS)) { | |
580 | pr_dbg("Got invalid sgid_idx %d\n", sgid_idx); | |
581 | return -EINVAL; | |
582 | } | |
583 | ||
14c74f72 YS |
584 | if (unlikely(dev_res->port.gid_tbl[sgid_idx].backend_gid_index == -1)) { |
585 | dev_res->port.gid_tbl[sgid_idx].backend_gid_index = | |
2b05705d | 586 | rdma_backend_get_gid_index(backend_dev, |
14c74f72 | 587 | &dev_res->port.gid_tbl[sgid_idx].gid); |
2b05705d YS |
588 | } |
589 | ||
590 | pr_dbg("backend_gid_index=%d\n", | |
14c74f72 | 591 | dev_res->port.gid_tbl[sgid_idx].backend_gid_index); |
2b05705d | 592 | |
14c74f72 | 593 | return dev_res->port.gid_tbl[sgid_idx].backend_gid_index; |
2b05705d YS |
594 | } |
595 | ||
ef6d4ccd YS |
596 | static void destroy_qp_hash_key(gpointer data) |
597 | { | |
598 | g_bytes_unref(data); | |
599 | } | |
600 | ||
2b05705d YS |
601 | static void init_ports(RdmaDeviceResources *dev_res) |
602 | { | |
14c74f72 | 603 | int i; |
2b05705d | 604 | |
14c74f72 | 605 | memset(&dev_res->port, 0, sizeof(dev_res->port)); |
2b05705d | 606 | |
14c74f72 YS |
607 | dev_res->port.state = IBV_PORT_DOWN; |
608 | for (i = 0; i < MAX_PORT_GIDS; i++) { | |
609 | dev_res->port.gid_tbl[i].backend_gid_index = -1; | |
2b05705d YS |
610 | } |
611 | } | |
612 | ||
613 | static void fini_ports(RdmaDeviceResources *dev_res, | |
614 | RdmaBackendDev *backend_dev, const char *ifname) | |
615 | { | |
616 | int i; | |
617 | ||
14c74f72 | 618 | dev_res->port.state = IBV_PORT_DOWN; |
2b05705d YS |
619 | for (i = 0; i < MAX_PORT_GIDS; i++) { |
620 | rdma_rm_del_gid(dev_res, backend_dev, ifname, i); | |
621 | } | |
622 | } | |
623 | ||
ef6d4ccd YS |
624 | int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr, |
625 | Error **errp) | |
626 | { | |
627 | dev_res->qp_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, | |
628 | destroy_qp_hash_key, NULL); | |
629 | if (!dev_res->qp_hash) { | |
630 | return -ENOMEM; | |
631 | } | |
632 | ||
633 | res_tbl_init("PD", &dev_res->pd_tbl, dev_attr->max_pd, sizeof(RdmaRmPD)); | |
634 | res_tbl_init("CQ", &dev_res->cq_tbl, dev_attr->max_cq, sizeof(RdmaRmCQ)); | |
635 | res_tbl_init("MR", &dev_res->mr_tbl, dev_attr->max_mr, sizeof(RdmaRmMR)); | |
636 | res_tbl_init("QP", &dev_res->qp_tbl, dev_attr->max_qp, sizeof(RdmaRmQP)); | |
637 | res_tbl_init("CQE_CTX", &dev_res->cqe_ctx_tbl, dev_attr->max_qp * | |
638 | dev_attr->max_qp_wr, sizeof(void *)); | |
639 | res_tbl_init("UC", &dev_res->uc_tbl, MAX_UCS, sizeof(RdmaRmUC)); | |
640 | ||
2b05705d YS |
641 | init_ports(dev_res); |
642 | ||
ef6d4ccd YS |
643 | return 0; |
644 | } | |
645 | ||
2b05705d YS |
646 | void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, |
647 | const char *ifname) | |
ef6d4ccd | 648 | { |
2b05705d YS |
649 | fini_ports(dev_res, backend_dev, ifname); |
650 | ||
ef6d4ccd YS |
651 | res_tbl_free(&dev_res->uc_tbl); |
652 | res_tbl_free(&dev_res->cqe_ctx_tbl); | |
653 | res_tbl_free(&dev_res->qp_tbl); | |
ef6d4ccd | 654 | res_tbl_free(&dev_res->mr_tbl); |
7131c4b0 | 655 | res_tbl_free(&dev_res->cq_tbl); |
ef6d4ccd | 656 | res_tbl_free(&dev_res->pd_tbl); |
7131c4b0 | 657 | |
ef6d4ccd YS |
658 | g_hash_table_destroy(dev_res->qp_hash); |
659 | } |