]>
Commit | Line | Data |
---|---|---|
ef6d4ccd YS |
1 | /* |
2 | * QEMU paravirtual RDMA - Resource Manager Implementation | |
3 | * | |
4 | * Copyright (C) 2018 Oracle | |
5 | * Copyright (C) 2018 Red Hat Inc | |
6 | * | |
7 | * Authors: | |
8 | * Yuval Shaia <yuval.shaia@oracle.com> | |
9 | * Marcel Apfelbaum <marcel@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
0efc9511 MT |
16 | #include "qemu/osdep.h" |
17 | #include "qapi/error.h" | |
18 | #include "cpu.h" | |
ef6d4ccd YS |
19 | |
20 | #include "rdma_utils.h" | |
21 | #include "rdma_backend.h" | |
22 | #include "rdma_rm.h" | |
23 | ||
ef6d4ccd YS |
24 | /* Page directory and page tables */ |
25 | #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } | |
26 | #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } | |
27 | ||
28 | static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, | |
29 | uint32_t tbl_sz, uint32_t res_sz) | |
30 | { | |
31 | tbl->tbl = g_malloc(tbl_sz * res_sz); | |
32 | ||
33 | strncpy(tbl->name, name, MAX_RM_TBL_NAME); | |
34 | tbl->name[MAX_RM_TBL_NAME - 1] = 0; | |
35 | ||
36 | tbl->bitmap = bitmap_new(tbl_sz); | |
37 | tbl->tbl_sz = tbl_sz; | |
38 | tbl->res_sz = res_sz; | |
39 | qemu_mutex_init(&tbl->lock); | |
40 | } | |
41 | ||
42 | static inline void res_tbl_free(RdmaRmResTbl *tbl) | |
43 | { | |
44 | qemu_mutex_destroy(&tbl->lock); | |
45 | g_free(tbl->tbl); | |
46 | bitmap_zero_extend(tbl->bitmap, tbl->tbl_sz, 0); | |
47 | } | |
48 | ||
49 | static inline void *res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle) | |
50 | { | |
51 | pr_dbg("%s, handle=%d\n", tbl->name, handle); | |
52 | ||
53 | if ((handle < tbl->tbl_sz) && (test_bit(handle, tbl->bitmap))) { | |
54 | return tbl->tbl + handle * tbl->res_sz; | |
55 | } else { | |
56 | pr_dbg("Invalid handle %d\n", handle); | |
57 | return NULL; | |
58 | } | |
59 | } | |
60 | ||
61 | static inline void *res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle) | |
62 | { | |
63 | qemu_mutex_lock(&tbl->lock); | |
64 | ||
65 | *handle = find_first_zero_bit(tbl->bitmap, tbl->tbl_sz); | |
66 | if (*handle > tbl->tbl_sz) { | |
67 | pr_dbg("Failed to alloc, bitmap is full\n"); | |
68 | qemu_mutex_unlock(&tbl->lock); | |
69 | return NULL; | |
70 | } | |
71 | ||
72 | set_bit(*handle, tbl->bitmap); | |
73 | ||
74 | qemu_mutex_unlock(&tbl->lock); | |
75 | ||
76 | memset(tbl->tbl + *handle * tbl->res_sz, 0, tbl->res_sz); | |
77 | ||
78 | pr_dbg("%s, handle=%d\n", tbl->name, *handle); | |
79 | ||
80 | return tbl->tbl + *handle * tbl->res_sz; | |
81 | } | |
82 | ||
83 | static inline void res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) | |
84 | { | |
85 | pr_dbg("%s, handle=%d\n", tbl->name, handle); | |
86 | ||
87 | qemu_mutex_lock(&tbl->lock); | |
88 | ||
89 | if (handle < tbl->tbl_sz) { | |
90 | clear_bit(handle, tbl->bitmap); | |
91 | } | |
92 | ||
93 | qemu_mutex_unlock(&tbl->lock); | |
94 | } | |
95 | ||
96 | int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
97 | uint32_t *pd_handle, uint32_t ctx_handle) | |
98 | { | |
99 | RdmaRmPD *pd; | |
100 | int ret = -ENOMEM; | |
101 | ||
102 | pd = res_tbl_alloc(&dev_res->pd_tbl, pd_handle); | |
103 | if (!pd) { | |
104 | goto out; | |
105 | } | |
106 | ||
107 | ret = rdma_backend_create_pd(backend_dev, &pd->backend_pd); | |
108 | if (ret) { | |
109 | ret = -EIO; | |
110 | goto out_tbl_dealloc; | |
111 | } | |
112 | ||
113 | pd->ctx_handle = ctx_handle; | |
114 | ||
115 | return 0; | |
116 | ||
117 | out_tbl_dealloc: | |
118 | res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle); | |
119 | ||
120 | out: | |
121 | return ret; | |
122 | } | |
123 | ||
124 | RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) | |
125 | { | |
126 | return res_tbl_get(&dev_res->pd_tbl, pd_handle); | |
127 | } | |
128 | ||
129 | void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) | |
130 | { | |
131 | RdmaRmPD *pd = rdma_rm_get_pd(dev_res, pd_handle); | |
132 | ||
133 | if (pd) { | |
134 | rdma_backend_destroy_pd(&pd->backend_pd); | |
135 | res_tbl_dealloc(&dev_res->pd_tbl, pd_handle); | |
136 | } | |
137 | } | |
138 | ||
139 | int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, | |
140 | uint64_t guest_start, size_t guest_length, void *host_virt, | |
141 | int access_flags, uint32_t *mr_handle, uint32_t *lkey, | |
142 | uint32_t *rkey) | |
143 | { | |
144 | RdmaRmMR *mr; | |
145 | int ret = 0; | |
146 | RdmaRmPD *pd; | |
9bbb8d35 | 147 | void *addr; |
ef6d4ccd YS |
148 | size_t length; |
149 | ||
150 | pd = rdma_rm_get_pd(dev_res, pd_handle); | |
151 | if (!pd) { | |
152 | pr_dbg("Invalid PD\n"); | |
153 | return -EINVAL; | |
154 | } | |
155 | ||
156 | mr = res_tbl_alloc(&dev_res->mr_tbl, mr_handle); | |
157 | if (!mr) { | |
158 | pr_dbg("Failed to allocate obj in table\n"); | |
159 | return -ENOMEM; | |
160 | } | |
161 | ||
162 | if (!host_virt) { | |
163 | /* TODO: This is my guess but not so sure that this needs to be | |
164 | * done */ | |
165 | length = TARGET_PAGE_SIZE; | |
9bbb8d35 | 166 | addr = g_malloc(length); |
ef6d4ccd | 167 | } else { |
7f99daad YS |
168 | mr->virt = host_virt; |
169 | pr_dbg("host_virt=0x%p\n", mr->virt); | |
170 | mr->length = guest_length; | |
6f559013 | 171 | pr_dbg("length=%zu\n", guest_length); |
7f99daad YS |
172 | mr->start = guest_start; |
173 | pr_dbg("guest_start=0x%" PRIx64 "\n", mr->start); | |
ef6d4ccd | 174 | |
7f99daad YS |
175 | length = mr->length; |
176 | addr = mr->virt; | |
ef6d4ccd YS |
177 | } |
178 | ||
179 | ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, addr, length, | |
180 | access_flags); | |
181 | if (ret) { | |
182 | pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret); | |
183 | ret = -EIO; | |
184 | goto out_dealloc_mr; | |
185 | } | |
186 | ||
187 | if (!host_virt) { | |
188 | *lkey = mr->lkey = rdma_backend_mr_lkey(&mr->backend_mr); | |
189 | *rkey = mr->rkey = rdma_backend_mr_rkey(&mr->backend_mr); | |
190 | } else { | |
191 | /* We keep mr_handle in lkey so send and recv get get mr ptr */ | |
192 | *lkey = *mr_handle; | |
193 | *rkey = -1; | |
194 | } | |
195 | ||
196 | mr->pd_handle = pd_handle; | |
197 | ||
198 | return 0; | |
199 | ||
200 | out_dealloc_mr: | |
201 | res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle); | |
202 | ||
203 | return ret; | |
204 | } | |
205 | ||
206 | RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) | |
207 | { | |
208 | return res_tbl_get(&dev_res->mr_tbl, mr_handle); | |
209 | } | |
210 | ||
211 | void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) | |
212 | { | |
213 | RdmaRmMR *mr = rdma_rm_get_mr(dev_res, mr_handle); | |
214 | ||
215 | if (mr) { | |
216 | rdma_backend_destroy_mr(&mr->backend_mr); | |
7f99daad | 217 | munmap(mr->virt, mr->length); |
ef6d4ccd YS |
218 | res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); |
219 | } | |
220 | } | |
221 | ||
222 | int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, | |
223 | uint32_t *uc_handle) | |
224 | { | |
225 | RdmaRmUC *uc; | |
226 | ||
227 | /* TODO: Need to make sure pfn is between bar start address and | |
228 | * bsd+RDMA_BAR2_UAR_SIZE | |
229 | if (pfn > RDMA_BAR2_UAR_SIZE) { | |
230 | pr_err("pfn out of range (%d > %d)\n", pfn, RDMA_BAR2_UAR_SIZE); | |
231 | return -ENOMEM; | |
232 | } | |
233 | */ | |
234 | ||
235 | uc = res_tbl_alloc(&dev_res->uc_tbl, uc_handle); | |
236 | if (!uc) { | |
237 | return -ENOMEM; | |
238 | } | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) | |
244 | { | |
245 | return res_tbl_get(&dev_res->uc_tbl, uc_handle); | |
246 | } | |
247 | ||
248 | void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) | |
249 | { | |
250 | RdmaRmUC *uc = rdma_rm_get_uc(dev_res, uc_handle); | |
251 | ||
252 | if (uc) { | |
253 | res_tbl_dealloc(&dev_res->uc_tbl, uc_handle); | |
254 | } | |
255 | } | |
256 | ||
257 | RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) | |
258 | { | |
259 | return res_tbl_get(&dev_res->cq_tbl, cq_handle); | |
260 | } | |
261 | ||
262 | int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
263 | uint32_t cqe, uint32_t *cq_handle, void *opaque) | |
264 | { | |
265 | int rc; | |
266 | RdmaRmCQ *cq; | |
267 | ||
268 | cq = res_tbl_alloc(&dev_res->cq_tbl, cq_handle); | |
269 | if (!cq) { | |
270 | return -ENOMEM; | |
271 | } | |
272 | ||
273 | cq->opaque = opaque; | |
274 | cq->notify = false; | |
275 | ||
276 | rc = rdma_backend_create_cq(backend_dev, &cq->backend_cq, cqe); | |
277 | if (rc) { | |
278 | rc = -EIO; | |
279 | goto out_dealloc_cq; | |
280 | } | |
281 | ||
282 | return 0; | |
283 | ||
284 | out_dealloc_cq: | |
285 | rdma_rm_dealloc_cq(dev_res, *cq_handle); | |
286 | ||
287 | return rc; | |
288 | } | |
289 | ||
290 | void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, | |
291 | bool notify) | |
292 | { | |
293 | RdmaRmCQ *cq; | |
294 | ||
295 | pr_dbg("cq_handle=%d, notify=0x%x\n", cq_handle, notify); | |
296 | ||
297 | cq = rdma_rm_get_cq(dev_res, cq_handle); | |
298 | if (!cq) { | |
299 | return; | |
300 | } | |
301 | ||
302 | cq->notify = notify; | |
303 | pr_dbg("notify=%d\n", cq->notify); | |
304 | } | |
305 | ||
306 | void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) | |
307 | { | |
308 | RdmaRmCQ *cq; | |
309 | ||
310 | cq = rdma_rm_get_cq(dev_res, cq_handle); | |
311 | if (!cq) { | |
312 | return; | |
313 | } | |
314 | ||
315 | rdma_backend_destroy_cq(&cq->backend_cq); | |
316 | ||
317 | res_tbl_dealloc(&dev_res->cq_tbl, cq_handle); | |
318 | } | |
319 | ||
320 | RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn) | |
321 | { | |
322 | GBytes *key = g_bytes_new(&qpn, sizeof(qpn)); | |
323 | ||
324 | RdmaRmQP *qp = g_hash_table_lookup(dev_res->qp_hash, key); | |
325 | ||
326 | g_bytes_unref(key); | |
327 | ||
328 | return qp; | |
329 | } | |
330 | ||
331 | int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, | |
332 | uint8_t qp_type, uint32_t max_send_wr, | |
333 | uint32_t max_send_sge, uint32_t send_cq_handle, | |
334 | uint32_t max_recv_wr, uint32_t max_recv_sge, | |
335 | uint32_t recv_cq_handle, void *opaque, uint32_t *qpn) | |
336 | { | |
337 | int rc; | |
338 | RdmaRmQP *qp; | |
339 | RdmaRmCQ *scq, *rcq; | |
340 | RdmaRmPD *pd; | |
341 | uint32_t rm_qpn; | |
342 | ||
343 | pr_dbg("qp_type=%d\n", qp_type); | |
344 | ||
345 | pd = rdma_rm_get_pd(dev_res, pd_handle); | |
346 | if (!pd) { | |
347 | pr_err("Invalid pd handle (%d)\n", pd_handle); | |
348 | return -EINVAL; | |
349 | } | |
350 | ||
351 | scq = rdma_rm_get_cq(dev_res, send_cq_handle); | |
352 | rcq = rdma_rm_get_cq(dev_res, recv_cq_handle); | |
353 | ||
354 | if (!scq || !rcq) { | |
355 | pr_err("Invalid send_cqn or recv_cqn (%d, %d)\n", | |
356 | send_cq_handle, recv_cq_handle); | |
357 | return -EINVAL; | |
358 | } | |
359 | ||
360 | qp = res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn); | |
361 | if (!qp) { | |
362 | return -ENOMEM; | |
363 | } | |
364 | pr_dbg("rm_qpn=%d\n", rm_qpn); | |
365 | ||
366 | qp->qpn = rm_qpn; | |
367 | qp->qp_state = IBV_QPS_RESET; | |
368 | qp->qp_type = qp_type; | |
369 | qp->send_cq_handle = send_cq_handle; | |
370 | qp->recv_cq_handle = recv_cq_handle; | |
371 | qp->opaque = opaque; | |
372 | ||
373 | rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd, | |
374 | &scq->backend_cq, &rcq->backend_cq, max_send_wr, | |
375 | max_recv_wr, max_send_sge, max_recv_sge); | |
376 | if (rc) { | |
377 | rc = -EIO; | |
378 | goto out_dealloc_qp; | |
379 | } | |
380 | ||
381 | *qpn = rdma_backend_qpn(&qp->backend_qp); | |
382 | pr_dbg("rm_qpn=%d, backend_qpn=0x%x\n", rm_qpn, *qpn); | |
383 | g_hash_table_insert(dev_res->qp_hash, g_bytes_new(qpn, sizeof(*qpn)), qp); | |
384 | ||
385 | return 0; | |
386 | ||
387 | out_dealloc_qp: | |
388 | res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); | |
389 | ||
390 | return rc; | |
391 | } | |
392 | ||
393 | int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, | |
394 | uint32_t qp_handle, uint32_t attr_mask, | |
395 | union ibv_gid *dgid, uint32_t dqpn, | |
396 | enum ibv_qp_state qp_state, uint32_t qkey, | |
397 | uint32_t rq_psn, uint32_t sq_psn) | |
398 | { | |
399 | RdmaRmQP *qp; | |
400 | int ret; | |
401 | ||
402 | pr_dbg("qpn=%d\n", qp_handle); | |
403 | ||
404 | qp = rdma_rm_get_qp(dev_res, qp_handle); | |
405 | if (!qp) { | |
406 | return -EINVAL; | |
407 | } | |
408 | ||
409 | pr_dbg("qp_type=%d\n", qp->qp_type); | |
410 | pr_dbg("attr_mask=0x%x\n", attr_mask); | |
411 | ||
412 | if (qp->qp_type == IBV_QPT_SMI) { | |
413 | pr_dbg("QP0 unsupported\n"); | |
414 | return -EPERM; | |
415 | } else if (qp->qp_type == IBV_QPT_GSI) { | |
416 | pr_dbg("QP1\n"); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | if (attr_mask & IBV_QP_STATE) { | |
421 | qp->qp_state = qp_state; | |
422 | pr_dbg("qp_state=%d\n", qp->qp_state); | |
423 | ||
424 | if (qp->qp_state == IBV_QPS_INIT) { | |
425 | ret = rdma_backend_qp_state_init(backend_dev, &qp->backend_qp, | |
426 | qp->qp_type, qkey); | |
427 | if (ret) { | |
428 | return -EIO; | |
429 | } | |
430 | } | |
431 | ||
432 | if (qp->qp_state == IBV_QPS_RTR) { | |
433 | ret = rdma_backend_qp_state_rtr(backend_dev, &qp->backend_qp, | |
434 | qp->qp_type, dgid, dqpn, rq_psn, | |
435 | qkey, attr_mask & IBV_QP_QKEY); | |
436 | if (ret) { | |
437 | return -EIO; | |
438 | } | |
439 | } | |
440 | ||
441 | if (qp->qp_state == IBV_QPS_RTS) { | |
442 | ret = rdma_backend_qp_state_rts(&qp->backend_qp, qp->qp_type, | |
443 | sq_psn, qkey, | |
444 | attr_mask & IBV_QP_QKEY); | |
445 | if (ret) { | |
446 | return -EIO; | |
447 | } | |
448 | } | |
449 | } | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
c99f2174 YS |
454 | int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, |
455 | uint32_t qp_handle, struct ibv_qp_attr *attr, | |
456 | int attr_mask, struct ibv_qp_init_attr *init_attr) | |
457 | { | |
458 | RdmaRmQP *qp; | |
459 | ||
460 | pr_dbg("qpn=%d\n", qp_handle); | |
461 | ||
462 | qp = rdma_rm_get_qp(dev_res, qp_handle); | |
463 | if (!qp) { | |
464 | return -EINVAL; | |
465 | } | |
466 | ||
467 | pr_dbg("qp_type=%d\n", qp->qp_type); | |
468 | ||
469 | return rdma_backend_query_qp(&qp->backend_qp, attr, attr_mask, init_attr); | |
470 | } | |
471 | ||
ef6d4ccd YS |
472 | void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle) |
473 | { | |
474 | RdmaRmQP *qp; | |
475 | GBytes *key; | |
476 | ||
477 | key = g_bytes_new(&qp_handle, sizeof(qp_handle)); | |
478 | qp = g_hash_table_lookup(dev_res->qp_hash, key); | |
479 | g_hash_table_remove(dev_res->qp_hash, key); | |
480 | g_bytes_unref(key); | |
481 | ||
482 | if (!qp) { | |
483 | return; | |
484 | } | |
485 | ||
486 | rdma_backend_destroy_qp(&qp->backend_qp); | |
487 | ||
488 | res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); | |
489 | } | |
490 | ||
491 | void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) | |
492 | { | |
493 | void **cqe_ctx; | |
494 | ||
495 | cqe_ctx = res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
496 | if (!cqe_ctx) { | |
497 | return NULL; | |
498 | } | |
499 | ||
500 | pr_dbg("ctx=%p\n", *cqe_ctx); | |
501 | ||
502 | return *cqe_ctx; | |
503 | } | |
504 | ||
505 | int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, | |
506 | void *ctx) | |
507 | { | |
508 | void **cqe_ctx; | |
509 | ||
510 | cqe_ctx = res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
511 | if (!cqe_ctx) { | |
512 | return -ENOMEM; | |
513 | } | |
514 | ||
515 | pr_dbg("ctx=%p\n", ctx); | |
516 | *cqe_ctx = ctx; | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) | |
522 | { | |
523 | res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); | |
524 | } | |
525 | ||
526 | static void destroy_qp_hash_key(gpointer data) | |
527 | { | |
528 | g_bytes_unref(data); | |
529 | } | |
530 | ||
531 | int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr, | |
532 | Error **errp) | |
533 | { | |
534 | dev_res->qp_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, | |
535 | destroy_qp_hash_key, NULL); | |
536 | if (!dev_res->qp_hash) { | |
537 | return -ENOMEM; | |
538 | } | |
539 | ||
540 | res_tbl_init("PD", &dev_res->pd_tbl, dev_attr->max_pd, sizeof(RdmaRmPD)); | |
541 | res_tbl_init("CQ", &dev_res->cq_tbl, dev_attr->max_cq, sizeof(RdmaRmCQ)); | |
542 | res_tbl_init("MR", &dev_res->mr_tbl, dev_attr->max_mr, sizeof(RdmaRmMR)); | |
543 | res_tbl_init("QP", &dev_res->qp_tbl, dev_attr->max_qp, sizeof(RdmaRmQP)); | |
544 | res_tbl_init("CQE_CTX", &dev_res->cqe_ctx_tbl, dev_attr->max_qp * | |
545 | dev_attr->max_qp_wr, sizeof(void *)); | |
546 | res_tbl_init("UC", &dev_res->uc_tbl, MAX_UCS, sizeof(RdmaRmUC)); | |
547 | ||
548 | return 0; | |
549 | } | |
550 | ||
551 | void rdma_rm_fini(RdmaDeviceResources *dev_res) | |
552 | { | |
553 | res_tbl_free(&dev_res->uc_tbl); | |
554 | res_tbl_free(&dev_res->cqe_ctx_tbl); | |
555 | res_tbl_free(&dev_res->qp_tbl); | |
556 | res_tbl_free(&dev_res->cq_tbl); | |
557 | res_tbl_free(&dev_res->mr_tbl); | |
558 | res_tbl_free(&dev_res->pd_tbl); | |
559 | g_hash_table_destroy(dev_res->qp_hash); | |
560 | } |