]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/srq.c
{net,IB}/mlx5: MKey/PSV commands via mlx5 ifc
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / srq.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/mlx5/qp.h>
35#include <linux/mlx5/srq.h>
36#include <linux/slab.h>
37#include <rdma/ib_umem.h>
43bc8893 38#include <rdma/ib_user_verbs.h>
e126ba97
EC
39
40#include "mlx5_ib.h"
41#include "user.h"
42
43/* not supported currently */
44static int srq_signature;
45
46static void *get_wqe(struct mlx5_ib_srq *srq, int n)
47{
48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
49}
50
51static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
52{
53 struct ib_event event;
54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
55
56 if (ibsrq->event_handler) {
57 event.device = ibsrq->device;
58 event.element.srq = ibsrq;
59 switch (type) {
60 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
61 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
62 break;
63 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
64 event.event = IB_EVENT_SRQ_ERR;
65 break;
66 default:
67 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
68 type, srq->srqn);
69 return;
70 }
71
72 ibsrq->event_handler(&event, ibsrq->srq_context);
73 }
74}
75
76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
af1ba291
AK
77 struct mlx5_srq_attr *in,
78 struct ib_udata *udata, int buf_size)
e126ba97
EC
79{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device);
cfb5e088 81 struct mlx5_ib_create_srq ucmd = {};
43bc8893 82 size_t ucmdlen;
e126ba97
EC
83 int err;
84 int npages;
85 int page_shift;
86 int ncont;
87 u32 offset;
cfb5e088 88 u32 uidx = MLX5_IB_DEFAULT_UIDX;
e126ba97 89
3d943c9d 90 ucmdlen = min(udata->inlen, sizeof(ucmd));
43bc8893
YD
91
92 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
e126ba97
EC
93 mlx5_ib_dbg(dev, "failed copy udata\n");
94 return -EFAULT;
95 }
43bc8893 96
cfb5e088 97 if (ucmd.reserved0 || ucmd.reserved1)
43bc8893
YD
98 return -EINVAL;
99
3d943c9d 100 if (udata->inlen > sizeof(ucmd) &&
cfb5e088 101 !ib_is_udata_cleared(udata, sizeof(ucmd),
3d943c9d 102 udata->inlen - sizeof(ucmd)))
cfb5e088
HA
103 return -EINVAL;
104
af1ba291 105 if (in->type == IB_SRQT_XRC) {
85d9691c
MD
106 err = get_srq_user_index(to_mucontext(pd->uobject->context),
107 &ucmd, udata->inlen, &uidx);
108 if (err)
109 return err;
110 }
cfb5e088 111
e126ba97
EC
112 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
113
114 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
115 0, 0);
116 if (IS_ERR(srq->umem)) {
117 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
118 err = PTR_ERR(srq->umem);
119 return err;
120 }
121
122 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
123 &page_shift, &ncont, NULL);
124 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
125 &offset);
126 if (err) {
127 mlx5_ib_warn(dev, "bad offset\n");
128 goto err_umem;
129 }
130
af1ba291
AK
131 in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
132 if (!in->pas) {
e126ba97
EC
133 err = -ENOMEM;
134 goto err_umem;
135 }
136
af1ba291 137 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
e126ba97
EC
138
139 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
140 ucmd.db_addr, &srq->db);
141 if (err) {
142 mlx5_ib_dbg(dev, "map doorbell failed\n");
143 goto err_in;
144 }
145
af1ba291
AK
146 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
147 in->page_offset = offset;
148 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
149 in->type == IB_SRQT_XRC)
150 in->user_index = uidx;
cfb5e088 151
e126ba97
EC
152 return 0;
153
154err_in:
af1ba291 155 kvfree(in->pas);
e126ba97
EC
156
157err_umem:
158 ib_umem_release(srq->umem);
159
160 return err;
161}
162
163static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
af1ba291 164 struct mlx5_srq_attr *in, int buf_size)
e126ba97
EC
165{
166 int err;
167 int i;
168 struct mlx5_wqe_srq_next_seg *next;
169 int page_shift;
170 int npages;
171
9603b61d 172 err = mlx5_db_alloc(dev->mdev, &srq->db);
e126ba97
EC
173 if (err) {
174 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
175 return err;
176 }
177
64ffaa21 178 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
e126ba97
EC
179 mlx5_ib_dbg(dev, "buf alloc failed\n");
180 err = -ENOMEM;
181 goto err_db;
182 }
183 page_shift = srq->buf.page_shift;
184
185 srq->head = 0;
186 srq->tail = srq->msrq.max - 1;
187 srq->wqe_ctr = 0;
188
189 for (i = 0; i < srq->msrq.max; i++) {
190 next = get_wqe(srq, i);
191 next->next_wqe_index =
192 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
193 }
194
195 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
196 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
197 buf_size, page_shift, srq->buf.npages, npages);
af1ba291
AK
198 in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
199 if (!in->pas) {
e126ba97
EC
200 err = -ENOMEM;
201 goto err_buf;
202 }
af1ba291 203 mlx5_fill_page_array(&srq->buf, in->pas);
e126ba97
EC
204
205 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
206 if (!srq->wrid) {
207 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
208 (unsigned long)(srq->msrq.max * sizeof(u64)));
209 err = -ENOMEM;
210 goto err_in;
211 }
212 srq->wq_sig = !!srq_signature;
213
af1ba291
AK
214 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
215 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
216 in->type == IB_SRQT_XRC)
217 in->user_index = MLX5_IB_DEFAULT_UIDX;
cfb5e088 218
e126ba97
EC
219 return 0;
220
221err_in:
af1ba291 222 kvfree(in->pas);
e126ba97
EC
223
224err_buf:
9603b61d 225 mlx5_buf_free(dev->mdev, &srq->buf);
e126ba97
EC
226
227err_db:
9603b61d 228 mlx5_db_free(dev->mdev, &srq->db);
e126ba97
EC
229 return err;
230}
231
232static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
233{
234 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
235 ib_umem_release(srq->umem);
236}
237
238
239static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
240{
241 kfree(srq->wrid);
9603b61d
JM
242 mlx5_buf_free(dev->mdev, &srq->buf);
243 mlx5_db_free(dev->mdev, &srq->db);
e126ba97
EC
244}
245
246struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
247 struct ib_srq_init_attr *init_attr,
248 struct ib_udata *udata)
249{
250 struct mlx5_ib_dev *dev = to_mdev(pd->device);
251 struct mlx5_ib_srq *srq;
252 int desc_size;
253 int buf_size;
254 int err;
af1ba291 255 struct mlx5_srq_attr in = {0};
938fe83c 256 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
e126ba97
EC
257
258 /* Sanity check SRQ size before proceeding */
938fe83c 259 if (init_attr->attr.max_wr >= max_srq_wqes) {
e126ba97
EC
260 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
261 init_attr->attr.max_wr,
938fe83c 262 max_srq_wqes);
e126ba97
EC
263 return ERR_PTR(-EINVAL);
264 }
265
266 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
267 if (!srq)
268 return ERR_PTR(-ENOMEM);
269
270 mutex_init(&srq->mutex);
271 spin_lock_init(&srq->lock);
272 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
273 srq->msrq.max_gs = init_attr->attr.max_sge;
274
275 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
276 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
277 desc_size = roundup_pow_of_two(desc_size);
278 desc_size = max_t(int, 32, desc_size);
279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
280 sizeof(struct mlx5_wqe_data_seg);
281 srq->msrq.wqe_shift = ilog2(desc_size);
282 buf_size = srq->msrq.max * desc_size;
283 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
284 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
285 srq->msrq.max_avail_gather);
286
287 if (pd->uobject)
af1ba291 288 err = create_srq_user(pd, srq, &in, udata, buf_size);
e126ba97 289 else
af1ba291 290 err = create_srq_kernel(dev, srq, &in, buf_size);
e126ba97
EC
291
292 if (err) {
293 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
294 pd->uobject ? "user" : "kernel", err);
295 goto err_srq;
296 }
297
af1ba291
AK
298 in.type = init_attr->srq_type;
299 in.log_size = ilog2(srq->msrq.max);
300 in.wqe_shift = srq->msrq.wqe_shift - 4;
301 if (srq->wq_sig)
302 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
303 if (init_attr->srq_type == IB_SRQT_XRC) {
304 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
305 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
e126ba97 306 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
af1ba291
AK
307 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
308 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
e126ba97
EC
309 }
310
af1ba291
AK
311 in.pd = to_mpd(pd)->pdn;
312 in.db_record = srq->db.dma;
313 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
314 kvfree(in.pas);
e126ba97
EC
315 if (err) {
316 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
56e1ab0f 317 goto err_usr_kern_srq;
e126ba97
EC
318 }
319
320 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
321
322 srq->msrq.event = mlx5_ib_srq_event;
323 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
324
325 if (pd->uobject)
326 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
327 mlx5_ib_dbg(dev, "copy to user failed\n");
328 err = -EFAULT;
329 goto err_core;
330 }
331
332 init_attr->attr.max_wr = srq->msrq.max - 1;
333
334 return &srq->ibsrq;
335
336err_core:
9603b61d 337 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
56e1ab0f
ML
338
339err_usr_kern_srq:
e126ba97
EC
340 if (pd->uobject)
341 destroy_srq_user(pd, srq);
342 else
343 destroy_srq_kernel(dev, srq);
344
345err_srq:
346 kfree(srq);
347
348 return ERR_PTR(err);
349}
350
351int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
352 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
353{
354 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
355 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
356 int ret;
357
358 /* We don't support resizing SRQs yet */
359 if (attr_mask & IB_SRQ_MAX_WR)
360 return -EINVAL;
361
362 if (attr_mask & IB_SRQ_LIMIT) {
363 if (attr->srq_limit >= srq->msrq.max)
364 return -EINVAL;
365
366 mutex_lock(&srq->mutex);
9603b61d 367 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
e126ba97
EC
368 mutex_unlock(&srq->mutex);
369
370 if (ret)
371 return ret;
372 }
373
374 return 0;
375}
376
377int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
378{
379 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
380 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
381 int ret;
af1ba291 382 struct mlx5_srq_attr *out;
e126ba97
EC
383
384 out = kzalloc(sizeof(*out), GFP_KERNEL);
385 if (!out)
386 return -ENOMEM;
387
9603b61d 388 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
e126ba97
EC
389 if (ret)
390 goto out_box;
391
af1ba291 392 srq_attr->srq_limit = out->lwm;
e126ba97
EC
393 srq_attr->max_wr = srq->msrq.max - 1;
394 srq_attr->max_sge = srq->msrq.max_gs;
395
396out_box:
397 kfree(out);
398 return ret;
399}
400
401int mlx5_ib_destroy_srq(struct ib_srq *srq)
402{
403 struct mlx5_ib_dev *dev = to_mdev(srq->device);
404 struct mlx5_ib_srq *msrq = to_msrq(srq);
405
9603b61d 406 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
e126ba97
EC
407
408 if (srq->uobject) {
409 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
410 ib_umem_release(msrq->umem);
411 } else {
1faacf82 412 destroy_srq_kernel(dev, msrq);
e126ba97
EC
413 }
414
415 kfree(srq);
416 return 0;
417}
418
419void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
420{
421 struct mlx5_wqe_srq_next_seg *next;
422
423 /* always called with interrupts disabled. */
424 spin_lock(&srq->lock);
425
426 next = get_wqe(srq, srq->tail);
427 next->next_wqe_index = cpu_to_be16(wqe_index);
428 srq->tail = wqe_index;
429
430 spin_unlock(&srq->lock);
431}
432
433int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr)
435{
436 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
437 struct mlx5_wqe_srq_next_seg *next;
438 struct mlx5_wqe_data_seg *scat;
89ea94a7
MG
439 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
440 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97
EC
441 unsigned long flags;
442 int err = 0;
443 int nreq;
444 int i;
445
446 spin_lock_irqsave(&srq->lock, flags);
447
89ea94a7
MG
448 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
449 err = -EIO;
450 *bad_wr = wr;
451 goto out;
452 }
453
e126ba97
EC
454 for (nreq = 0; wr; nreq++, wr = wr->next) {
455 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
456 err = -EINVAL;
457 *bad_wr = wr;
458 break;
459 }
460
461 if (unlikely(srq->head == srq->tail)) {
462 err = -ENOMEM;
463 *bad_wr = wr;
464 break;
465 }
466
467 srq->wrid[srq->head] = wr->wr_id;
468
469 next = get_wqe(srq, srq->head);
470 srq->head = be16_to_cpu(next->next_wqe_index);
471 scat = (struct mlx5_wqe_data_seg *)(next + 1);
472
473 for (i = 0; i < wr->num_sge; i++) {
474 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
475 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
476 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
477 }
478
479 if (i < srq->msrq.max_avail_gather) {
480 scat[i].byte_count = 0;
481 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
482 scat[i].addr = 0;
483 }
484 }
485
486 if (likely(nreq)) {
487 srq->wqe_ctr += nreq;
488
489 /* Make sure that descriptors are written before
490 * doorbell record.
491 */
492 wmb();
493
494 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
495 }
89ea94a7 496out:
e126ba97
EC
497 spin_unlock_irqrestore(&srq->lock, flags);
498
499 return err;
500}