]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/infiniband/hw/hns/hns_roce_srq.c
Merge tag 'perf-urgent-2020-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / hns / hns_roce_srq.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2018 Hisilicon Limited.
4 */
5
6 #include <rdma/ib_umem.h>
7 #include <rdma/hns-abi.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
11
12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13 {
14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 struct hns_roce_srq *srq;
16
17 xa_lock(&srq_table->xa);
18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19 if (srq)
20 atomic_inc(&srq->refcount);
21 xa_unlock(&srq_table->xa);
22
23 if (!srq) {
24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25 return;
26 }
27
28 srq->event(srq, event_type);
29
30 if (atomic_dec_and_test(&srq->refcount))
31 complete(&srq->free);
32 }
33
34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35 enum hns_roce_event event_type)
36 {
37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38 struct ib_srq *ibsrq = &srq->ibsrq;
39 struct ib_event event;
40
41 if (ibsrq->event_handler) {
42 event.device = ibsrq->device;
43 event.element.srq = ibsrq;
44 switch (event_type) {
45 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47 break;
48 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49 event.event = IB_EVENT_SRQ_ERR;
50 break;
51 default:
52 dev_err(hr_dev->dev,
53 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54 event_type, srq->srqn);
55 return;
56 }
57
58 ibsrq->event_handler(&event, ibsrq->srq_context);
59 }
60 }
61
62 static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
63 struct hns_roce_cmd_mailbox *mailbox,
64 unsigned long srq_num)
65 {
66 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67 HNS_ROCE_CMD_CREATE_SRQ,
68 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 }
70
71 static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
72 struct hns_roce_cmd_mailbox *mailbox,
73 unsigned long srq_num)
74 {
75 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76 mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
77 HNS_ROCE_CMD_TIMEOUT_MSECS);
78 }
79
80 static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
81 u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
82 {
83 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
84 struct ib_device *ibdev = &hr_dev->ib_dev;
85 struct hns_roce_cmd_mailbox *mailbox;
86 u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
87 u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
88 dma_addr_t dma_handle_wqe = 0;
89 dma_addr_t dma_handle_idx = 0;
90 int ret;
91
92 /* Get the physical address of srq buf */
93 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
94 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
95 if (ret < 1) {
96 ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
97 return -ENOBUFS;
98 }
99
100 /* Get physical address of idx que buf */
101 ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
102 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
103 if (ret < 1) {
104 ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
105 return -ENOBUFS;
106 }
107
108 ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
109 if (ret) {
110 ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
111 return -ENOMEM;
112 }
113
114 ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
115 if (ret) {
116 ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
117 goto err_out;
118 }
119
120 ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
121 if (ret) {
122 ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
123 goto err_put;
124 }
125
126 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
127 if (IS_ERR_OR_NULL(mailbox)) {
128 ret = -ENOMEM;
129 ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
130 goto err_xa;
131 }
132
133 hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
134 mtts_wqe, mtts_idx, dma_handle_wqe,
135 dma_handle_idx);
136
137 ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
138 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
139 if (ret) {
140 ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
141 goto err_xa;
142 }
143
144 atomic_set(&srq->refcount, 1);
145 init_completion(&srq->free);
146 return ret;
147
148 err_xa:
149 xa_erase(&srq_table->xa, srq->srqn);
150
151 err_put:
152 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
153
154 err_out:
155 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
156 return ret;
157 }
158
159 static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
160 {
161 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
162 int ret;
163
164 ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
165 if (ret)
166 dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
167 ret, srq->srqn);
168
169 xa_erase(&srq_table->xa, srq->srqn);
170
171 if (atomic_dec_and_test(&srq->refcount))
172 complete(&srq->free);
173 wait_for_completion(&srq->free);
174
175 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
176 hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
177 }
178
179 static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
180 struct ib_udata *udata, unsigned long addr)
181 {
182 struct ib_device *ibdev = &hr_dev->ib_dev;
183 struct hns_roce_buf_attr buf_attr = {};
184 int err;
185
186 srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
187 HNS_ROCE_SGE_SIZE *
188 srq->max_gs)));
189
190 buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
191 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
192 srq->wqe_shift);
193 buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
194 buf_attr.region_count = 1;
195 buf_attr.fixed_page = true;
196
197 err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
198 hr_dev->caps.srqwqe_ba_pg_sz +
199 HNS_HW_PAGE_SHIFT, udata, addr);
200 if (err)
201 ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
202
203 return err;
204 }
205
206 static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
207 {
208 hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
209 }
210
211 static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
212 struct ib_udata *udata, unsigned long addr)
213 {
214 struct hns_roce_idx_que *idx_que = &srq->idx_que;
215 struct ib_device *ibdev = &hr_dev->ib_dev;
216 struct hns_roce_buf_attr buf_attr = {};
217 int err;
218
219 srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
220
221 buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
222 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
223 srq->idx_que.entry_shift);
224 buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
225 buf_attr.region_count = 1;
226 buf_attr.fixed_page = true;
227
228 err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
229 hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
230 udata, addr);
231 if (err) {
232 ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
233 return err;
234 }
235
236 if (!udata) {
237 idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
238 if (!idx_que->bitmap) {
239 ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
240 err = -ENOMEM;
241 goto err_idx_mtr;
242 }
243
244 }
245
246 return 0;
247 err_idx_mtr:
248 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
249
250 return err;
251 }
252
253 static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
254 {
255 struct hns_roce_idx_que *idx_que = &srq->idx_que;
256
257 bitmap_free(idx_que->bitmap);
258 idx_que->bitmap = NULL;
259 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
260 }
261
262 static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
263 {
264 srq->head = 0;
265 srq->tail = srq->wqe_cnt - 1;
266 srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
267 if (!srq->wrid)
268 return -ENOMEM;
269
270 return 0;
271 }
272
273 static void free_srq_wrid(struct hns_roce_srq *srq)
274 {
275 kfree(srq->wrid);
276 srq->wrid = NULL;
277 }
278
279 int hns_roce_create_srq(struct ib_srq *ib_srq,
280 struct ib_srq_init_attr *init_attr,
281 struct ib_udata *udata)
282 {
283 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
284 struct hns_roce_ib_create_srq_resp resp = {};
285 struct hns_roce_srq *srq = to_hr_srq(ib_srq);
286 struct ib_device *ibdev = &hr_dev->ib_dev;
287 struct hns_roce_ib_create_srq ucmd = {};
288 int ret = 0;
289 u32 cqn;
290
291 /* Check the actual SRQ wqe and SRQ sge num */
292 if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
293 init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
294 return -EINVAL;
295
296 mutex_init(&srq->mutex);
297 spin_lock_init(&srq->lock);
298
299 srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
300 srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
301
302 if (udata) {
303 ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
304 if (ret) {
305 ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
306 ret);
307 return ret;
308 }
309 }
310
311 ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
312 if (ret) {
313 ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
314 return ret;
315 }
316
317 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
318 if (ret) {
319 ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
320 goto err_buf_alloc;
321 }
322
323 if (!udata) {
324 ret = alloc_srq_wrid(hr_dev, srq);
325 if (ret) {
326 ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
327 ret);
328 goto err_idx_alloc;
329 }
330 }
331
332 cqn = ib_srq_has_cq(init_attr->srq_type) ?
333 to_hr_cq(init_attr->ext.cq)->cqn : 0;
334 srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
335
336 ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
337 if (ret) {
338 ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
339 goto err_wrid_alloc;
340 }
341
342 srq->event = hns_roce_ib_srq_event;
343 resp.srqn = srq->srqn;
344
345 if (udata) {
346 if (ib_copy_to_udata(udata, &resp,
347 min(udata->outlen, sizeof(resp)))) {
348 ret = -EFAULT;
349 goto err_srqc_alloc;
350 }
351 }
352
353 return 0;
354
355 err_srqc_alloc:
356 free_srqc(hr_dev, srq);
357 err_wrid_alloc:
358 free_srq_wrid(srq);
359 err_idx_alloc:
360 free_srq_idx(hr_dev, srq);
361 err_buf_alloc:
362 free_srq_buf(hr_dev, srq);
363 return ret;
364 }
365
366 void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
367 {
368 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
369 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
370
371 free_srqc(hr_dev, srq);
372 free_srq_idx(hr_dev, srq);
373 free_srq_wrid(srq);
374 free_srq_buf(hr_dev, srq);
375 }
376
377 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
378 {
379 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
380
381 xa_init(&srq_table->xa);
382
383 return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
384 hr_dev->caps.num_srqs - 1,
385 hr_dev->caps.reserved_srqs, 0);
386 }
387
388 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
389 {
390 hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
391 }