]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/infiniband/hw/hns/hns_roce_srq.c
RDMA/restrack: Resource-tracker should not use uobject pointers
[mirror_ubuntu-eoan-kernel.git] / drivers / infiniband / hw / hns / hns_roce_srq.c
CommitLineData
5c1f167a
LO
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018 Hisilicon Limited.
4 */
5
6#include <rdma/ib_umem.h>
7#include <rdma/hns-abi.h>
8#include "hns_roce_device.h"
9#include "hns_roce_cmd.h"
10#include "hns_roce_hem.h"
11
81fce629
LO
12void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13{
14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 struct hns_roce_srq *srq;
16
17 xa_lock(&srq_table->xa);
18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19 if (srq)
20 atomic_inc(&srq->refcount);
21 xa_unlock(&srq_table->xa);
22
23 if (!srq) {
24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25 return;
26 }
27
28 srq->event(srq, event_type);
29
30 if (atomic_dec_and_test(&srq->refcount))
31 complete(&srq->free);
32}
33EXPORT_SYMBOL_GPL(hns_roce_srq_event);
34
c7bcb134
LO
35static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
36 enum hns_roce_event event_type)
37{
38 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
39 struct ib_srq *ibsrq = &srq->ibsrq;
40 struct ib_event event;
41
42 if (ibsrq->event_handler) {
43 event.device = ibsrq->device;
44 event.element.srq = ibsrq;
45 switch (event_type) {
46 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
47 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
48 break;
49 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
50 event.event = IB_EVENT_SRQ_ERR;
51 break;
52 default:
53 dev_err(hr_dev->dev,
54 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
55 event_type, srq->srqn);
56 return;
57 }
58
59 ibsrq->event_handler(&event, ibsrq->srq_context);
60 }
61}
62
63static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
64 struct hns_roce_cmd_mailbox *mailbox,
65 unsigned long srq_num)
66{
67 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
68 HNS_ROCE_CMD_SW2HW_SRQ,
69 HNS_ROCE_CMD_TIMEOUT_MSECS);
70}
71
72static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
73 struct hns_roce_cmd_mailbox *mailbox,
74 unsigned long srq_num)
75{
76 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
77 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
78 HNS_ROCE_CMD_TIMEOUT_MSECS);
79}
80
81int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
82 struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
83 struct hns_roce_srq *srq)
84{
85 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
86 struct hns_roce_cmd_mailbox *mailbox;
87 dma_addr_t dma_handle_wqe;
88 dma_addr_t dma_handle_idx;
89 u64 *mtts_wqe;
90 u64 *mtts_idx;
91 int ret;
92
93 /* Get the physical address of srq buf */
94 mtts_wqe = hns_roce_table_find(hr_dev,
95 &hr_dev->mr_table.mtt_srqwqe_table,
96 srq->mtt.first_seg,
97 &dma_handle_wqe);
98 if (!mtts_wqe) {
99 dev_err(hr_dev->dev,
100 "SRQ alloc.Failed to find srq buf addr.\n");
101 return -EINVAL;
102 }
103
104 /* Get physical address of idx que buf */
105 mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
106 srq->idx_que.mtt.first_seg,
107 &dma_handle_idx);
108 if (!mtts_idx) {
109 dev_err(hr_dev->dev,
110 "SRQ alloc.Failed to find idx que buf addr.\n");
111 return -EINVAL;
112 }
113
114 ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
115 if (ret == -1) {
116 dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
117 return -ENOMEM;
118 }
119
120 ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
121 if (ret)
122 goto err_out;
123
124 ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
125 if (ret)
126 goto err_put;
127
128 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
129 if (IS_ERR(mailbox)) {
130 ret = PTR_ERR(mailbox);
131 goto err_xa;
132 }
133
134 hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
135 mtts_wqe, mtts_idx, dma_handle_wqe,
136 dma_handle_idx);
137
138 ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
139 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
140 if (ret)
141 goto err_xa;
142
143 atomic_set(&srq->refcount, 1);
144 init_completion(&srq->free);
145 return ret;
146
147err_xa:
148 xa_erase(&srq_table->xa, srq->srqn);
149
150err_put:
151 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
152
153err_out:
154 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
155 return ret;
156}
157
158void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
159{
160 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
161 int ret;
162
163 ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
164 if (ret)
165 dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
166 ret, srq->srqn);
167
168 xa_erase(&srq_table->xa, srq->srqn);
169
170 if (atomic_dec_and_test(&srq->refcount))
171 complete(&srq->free);
172 wait_for_completion(&srq->free);
173
174 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
175 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
176}
177
178static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
179 u32 page_shift)
180{
181 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
182 struct hns_roce_idx_que *idx_que = &srq->idx_que;
183 u32 bitmap_num;
184 int i;
185
186 bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
187
188 idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
189 if (!idx_que->bitmap)
190 return -ENOMEM;
191
192 bitmap_num = bitmap_num / (8 * sizeof(u64));
193
194 idx_que->buf_size = srq->idx_que.buf_size;
195
196 if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
197 &idx_que->idx_buf, page_shift)) {
198 kfree(idx_que->bitmap);
199 return -ENOMEM;
200 }
201
202 for (i = 0; i < bitmap_num; i++)
203 idx_que->bitmap[i] = ~(0UL);
204
205 return 0;
206}
207
208struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
209 struct ib_srq_init_attr *srq_init_attr,
210 struct ib_udata *udata)
211{
212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 struct hns_roce_srq *srq;
214 int srq_desc_size;
215 int srq_buf_size;
216 u32 page_shift;
217 int ret = 0;
218 u32 npages;
219 u32 cqn;
220
221 /* Check the actual SRQ wqe and SRQ sge num */
222 if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
223 srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
224 return ERR_PTR(-EINVAL);
225
226 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
227 if (!srq)
228 return ERR_PTR(-ENOMEM);
229
230 mutex_init(&srq->mutex);
231 spin_lock_init(&srq->lock);
232
233 srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
234 srq->max_gs = srq_init_attr->attr.max_sge;
235
236 srq_desc_size = max(16, 16 * srq->max_gs);
237
238 srq->wqe_shift = ilog2(srq_desc_size);
239
240 srq_buf_size = srq->max * srq_desc_size;
241
242 srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
243 srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
244 srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
245 srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
246
247 if (udata) {
248 struct hns_roce_ib_create_srq ucmd;
249
250 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
251 ret = -EFAULT;
252 goto err_srq;
253 }
254
255 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
256 srq_buf_size, 0, 0);
257 if (IS_ERR(srq->umem)) {
258 ret = PTR_ERR(srq->umem);
259 goto err_srq;
260 }
261
262 if (hr_dev->caps.srqwqe_buf_pg_sz) {
263 npages = (ib_umem_page_count(srq->umem) +
264 (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
265 (1 << hr_dev->caps.srqwqe_buf_pg_sz);
266 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
267 ret = hns_roce_mtt_init(hr_dev, npages,
268 page_shift,
269 &srq->mtt);
270 } else
271 ret = hns_roce_mtt_init(hr_dev,
272 ib_umem_page_count(srq->umem),
273 srq->umem->page_shift,
274 &srq->mtt);
275 if (ret)
276 goto err_buf;
277
278 ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
279 if (ret)
280 goto err_srq_mtt;
281
282 /* config index queue BA */
283 srq->idx_que.umem = ib_umem_get(pd->uobject->context,
284 ucmd.que_addr,
285 srq->idx_que.buf_size, 0, 0);
286 if (IS_ERR(srq->idx_que.umem)) {
287 dev_err(hr_dev->dev,
288 "ib_umem_get error for index queue\n");
e9dfa53a 289 ret = PTR_ERR(srq->idx_que.umem);
c7bcb134
LO
290 goto err_srq_mtt;
291 }
292
293 if (hr_dev->caps.idx_buf_pg_sz) {
294 npages = (ib_umem_page_count(srq->idx_que.umem) +
295 (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
296 (1 << hr_dev->caps.idx_buf_pg_sz);
297 page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
298 ret = hns_roce_mtt_init(hr_dev, npages,
299 page_shift, &srq->idx_que.mtt);
300 } else {
301 ret = hns_roce_mtt_init(hr_dev,
302 ib_umem_page_count(srq->idx_que.umem),
303 srq->idx_que.umem->page_shift,
304 &srq->idx_que.mtt);
305 }
306
307 if (ret) {
308 dev_err(hr_dev->dev,
309 "hns_roce_mtt_init error for idx que\n");
310 goto err_idx_mtt;
311 }
312
313 ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
314 srq->idx_que.umem);
315 if (ret) {
316 dev_err(hr_dev->dev,
317 "hns_roce_ib_umem_write_mtt error for idx que\n");
318 goto err_idx_buf;
319 }
320 } else {
321 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
322 if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
323 (1 << page_shift) * 2,
324 &srq->buf, page_shift)) {
325 ret = -ENOMEM;
326 goto err_srq;
327 }
328
329 srq->head = 0;
330 srq->tail = srq->max - 1;
331
332 ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
333 srq->buf.page_shift, &srq->mtt);
334 if (ret)
335 goto err_buf;
336
337 ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
338 if (ret)
339 goto err_srq_mtt;
340
341 page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
342 ret = hns_roce_create_idx_que(pd, srq, page_shift);
343 if (ret) {
344 dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
345 ret);
346 goto err_srq_mtt;
347 }
348
349 /* Init mtt table for idx_que */
350 ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
351 srq->idx_que.idx_buf.page_shift,
352 &srq->idx_que.mtt);
353 if (ret)
354 goto err_create_idx;
355
356 /* Write buffer address into the mtt table */
357 ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
358 &srq->idx_que.idx_buf);
359 if (ret)
360 goto err_idx_buf;
361
362 srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
363 if (!srq->wrid) {
364 ret = -ENOMEM;
365 goto err_idx_buf;
366 }
367 }
368
369 cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
370 to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
371
372 srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
373
374 ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0,
375 &srq->mtt, 0, srq);
376 if (ret)
377 goto err_wrid;
378
379 srq->event = hns_roce_ib_srq_event;
380 srq->ibsrq.ext.xrc.srq_num = srq->srqn;
381
382 if (pd->uobject) {
383 if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
384 ret = -EFAULT;
385 goto err_wrid;
386 }
387 }
388
389 return &srq->ibsrq;
390
391err_wrid:
392 kvfree(srq->wrid);
393
394err_idx_buf:
395 hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
396
397err_idx_mtt:
398 if (udata)
399 ib_umem_release(srq->idx_que.umem);
400
401err_create_idx:
402 hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
403 &srq->idx_que.idx_buf);
404 kfree(srq->idx_que.bitmap);
405
406err_srq_mtt:
407 hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
408
409err_buf:
410 if (udata)
411 ib_umem_release(srq->umem);
412 else
413 hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
414
415err_srq:
416 kfree(srq);
417 return ERR_PTR(ret);
418}
419
420int hns_roce_destroy_srq(struct ib_srq *ibsrq)
421{
422 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
423 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
424
425 hns_roce_srq_free(hr_dev, srq);
426 hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
427
428 if (ibsrq->uobject) {
429 hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
430 ib_umem_release(srq->idx_que.umem);
431 ib_umem_release(srq->umem);
432 } else {
433 kvfree(srq->wrid);
434 hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
435 &srq->buf);
436 }
437
438 kfree(srq);
439
440 return 0;
441}
442
5c1f167a
LO
443int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
444{
445 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
446
447 xa_init(&srq_table->xa);
448
449 return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
450 hr_dev->caps.num_srqs - 1,
451 hr_dev->caps.reserved_srqs, 0);
452}
453
454void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
455{
456 hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
457}