]>
Commit | Line | Data |
---|---|---|
e126ba97 EC |
1 | /* |
2 | * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/module.h> | |
34 | #include <linux/mlx5/qp.h> | |
35 | #include <linux/mlx5/srq.h> | |
36 | #include <linux/slab.h> | |
37 | #include <rdma/ib_umem.h> | |
38 | ||
39 | #include "mlx5_ib.h" | |
40 | #include "user.h" | |
41 | ||
42 | /* not supported currently */ | |
43 | static int srq_signature; | |
44 | ||
45 | static void *get_wqe(struct mlx5_ib_srq *srq, int n) | |
46 | { | |
47 | return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); | |
48 | } | |
49 | ||
50 | static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) | |
51 | { | |
52 | struct ib_event event; | |
53 | struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; | |
54 | ||
55 | if (ibsrq->event_handler) { | |
56 | event.device = ibsrq->device; | |
57 | event.element.srq = ibsrq; | |
58 | switch (type) { | |
59 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
60 | event.event = IB_EVENT_SRQ_LIMIT_REACHED; | |
61 | break; | |
62 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
63 | event.event = IB_EVENT_SRQ_ERR; | |
64 | break; | |
65 | default: | |
66 | pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", | |
67 | type, srq->srqn); | |
68 | return; | |
69 | } | |
70 | ||
71 | ibsrq->event_handler(&event, ibsrq->srq_context); | |
72 | } | |
73 | } | |
74 | ||
75 | static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |
76 | struct mlx5_create_srq_mbox_in **in, | |
77 | struct ib_udata *udata, int buf_size, int *inlen) | |
78 | { | |
79 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
80 | struct mlx5_ib_create_srq ucmd; | |
81 | int err; | |
82 | int npages; | |
83 | int page_shift; | |
84 | int ncont; | |
85 | u32 offset; | |
86 | ||
87 | if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | |
88 | mlx5_ib_dbg(dev, "failed copy udata\n"); | |
89 | return -EFAULT; | |
90 | } | |
91 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); | |
92 | ||
93 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, | |
94 | 0, 0); | |
95 | if (IS_ERR(srq->umem)) { | |
96 | mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); | |
97 | err = PTR_ERR(srq->umem); | |
98 | return err; | |
99 | } | |
100 | ||
101 | mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, | |
102 | &page_shift, &ncont, NULL); | |
103 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, | |
104 | &offset); | |
105 | if (err) { | |
106 | mlx5_ib_warn(dev, "bad offset\n"); | |
107 | goto err_umem; | |
108 | } | |
109 | ||
110 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; | |
111 | *in = mlx5_vzalloc(*inlen); | |
112 | if (!(*in)) { | |
113 | err = -ENOMEM; | |
114 | goto err_umem; | |
115 | } | |
116 | ||
117 | mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); | |
118 | ||
119 | err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), | |
120 | ucmd.db_addr, &srq->db); | |
121 | if (err) { | |
122 | mlx5_ib_dbg(dev, "map doorbell failed\n"); | |
123 | goto err_in; | |
124 | } | |
125 | ||
1b77d2bd | 126 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
e126ba97 EC |
127 | (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); |
128 | ||
129 | return 0; | |
130 | ||
131 | err_in: | |
132 | mlx5_vfree(*in); | |
133 | ||
134 | err_umem: | |
135 | ib_umem_release(srq->umem); | |
136 | ||
137 | return err; | |
138 | } | |
139 | ||
140 | static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, | |
141 | struct mlx5_create_srq_mbox_in **in, int buf_size, | |
142 | int *inlen) | |
143 | { | |
144 | int err; | |
145 | int i; | |
146 | struct mlx5_wqe_srq_next_seg *next; | |
147 | int page_shift; | |
148 | int npages; | |
149 | ||
150 | err = mlx5_db_alloc(&dev->mdev, &srq->db); | |
151 | if (err) { | |
152 | mlx5_ib_warn(dev, "alloc dbell rec failed\n"); | |
153 | return err; | |
154 | } | |
155 | ||
156 | *srq->db.db = 0; | |
157 | ||
158 | if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { | |
159 | mlx5_ib_dbg(dev, "buf alloc failed\n"); | |
160 | err = -ENOMEM; | |
161 | goto err_db; | |
162 | } | |
163 | page_shift = srq->buf.page_shift; | |
164 | ||
165 | srq->head = 0; | |
166 | srq->tail = srq->msrq.max - 1; | |
167 | srq->wqe_ctr = 0; | |
168 | ||
169 | for (i = 0; i < srq->msrq.max; i++) { | |
170 | next = get_wqe(srq, i); | |
171 | next->next_wqe_index = | |
172 | cpu_to_be16((i + 1) & (srq->msrq.max - 1)); | |
173 | } | |
174 | ||
175 | npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); | |
176 | mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", | |
177 | buf_size, page_shift, srq->buf.npages, npages); | |
178 | *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; | |
179 | *in = mlx5_vzalloc(*inlen); | |
180 | if (!*in) { | |
181 | err = -ENOMEM; | |
182 | goto err_buf; | |
183 | } | |
184 | mlx5_fill_page_array(&srq->buf, (*in)->pas); | |
185 | ||
186 | srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); | |
187 | if (!srq->wrid) { | |
188 | mlx5_ib_dbg(dev, "kmalloc failed %lu\n", | |
189 | (unsigned long)(srq->msrq.max * sizeof(u64))); | |
190 | err = -ENOMEM; | |
191 | goto err_in; | |
192 | } | |
193 | srq->wq_sig = !!srq_signature; | |
194 | ||
1b77d2bd | 195 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
e126ba97 EC |
196 | |
197 | return 0; | |
198 | ||
199 | err_in: | |
200 | mlx5_vfree(*in); | |
201 | ||
202 | err_buf: | |
203 | mlx5_buf_free(&dev->mdev, &srq->buf); | |
204 | ||
205 | err_db: | |
206 | mlx5_db_free(&dev->mdev, &srq->db); | |
207 | return err; | |
208 | } | |
209 | ||
210 | static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) | |
211 | { | |
212 | mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); | |
213 | ib_umem_release(srq->umem); | |
214 | } | |
215 | ||
216 | ||
217 | static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) | |
218 | { | |
219 | kfree(srq->wrid); | |
220 | mlx5_buf_free(&dev->mdev, &srq->buf); | |
221 | mlx5_db_free(&dev->mdev, &srq->db); | |
222 | } | |
223 | ||
224 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |
225 | struct ib_srq_init_attr *init_attr, | |
226 | struct ib_udata *udata) | |
227 | { | |
228 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | |
229 | struct mlx5_ib_srq *srq; | |
230 | int desc_size; | |
231 | int buf_size; | |
232 | int err; | |
233 | struct mlx5_create_srq_mbox_in *uninitialized_var(in); | |
234 | int uninitialized_var(inlen); | |
235 | int is_xrc; | |
236 | u32 flgs, xrcdn; | |
237 | ||
238 | /* Sanity check SRQ size before proceeding */ | |
239 | if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { | |
240 | mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", | |
241 | init_attr->attr.max_wr, | |
242 | dev->mdev.caps.max_srq_wqes); | |
243 | return ERR_PTR(-EINVAL); | |
244 | } | |
245 | ||
246 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | |
247 | if (!srq) | |
248 | return ERR_PTR(-ENOMEM); | |
249 | ||
250 | mutex_init(&srq->mutex); | |
251 | spin_lock_init(&srq->lock); | |
252 | srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); | |
253 | srq->msrq.max_gs = init_attr->attr.max_sge; | |
254 | ||
255 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | |
256 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | |
257 | desc_size = roundup_pow_of_two(desc_size); | |
258 | desc_size = max_t(int, 32, desc_size); | |
259 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / | |
260 | sizeof(struct mlx5_wqe_data_seg); | |
261 | srq->msrq.wqe_shift = ilog2(desc_size); | |
262 | buf_size = srq->msrq.max * desc_size; | |
263 | mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", | |
264 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, | |
265 | srq->msrq.max_avail_gather); | |
266 | ||
267 | if (pd->uobject) | |
268 | err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); | |
269 | else | |
270 | err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); | |
271 | ||
272 | if (err) { | |
273 | mlx5_ib_warn(dev, "create srq %s failed, err %d\n", | |
274 | pd->uobject ? "user" : "kernel", err); | |
275 | goto err_srq; | |
276 | } | |
277 | ||
278 | is_xrc = (init_attr->srq_type == IB_SRQT_XRC); | |
279 | in->ctx.state_log_sz = ilog2(srq->msrq.max); | |
280 | flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; | |
281 | xrcdn = 0; | |
282 | if (is_xrc) { | |
283 | xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; | |
284 | in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); | |
285 | } else if (init_attr->srq_type == IB_SRQT_BASIC) { | |
286 | xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; | |
287 | in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); | |
288 | } | |
289 | ||
290 | in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); | |
291 | ||
292 | in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); | |
293 | in->ctx.db_record = cpu_to_be64(srq->db.dma); | |
294 | err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); | |
295 | mlx5_vfree(in); | |
296 | if (err) { | |
297 | mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); | |
56e1ab0f | 298 | goto err_usr_kern_srq; |
e126ba97 EC |
299 | } |
300 | ||
301 | mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); | |
302 | ||
303 | srq->msrq.event = mlx5_ib_srq_event; | |
304 | srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; | |
305 | ||
306 | if (pd->uobject) | |
307 | if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { | |
308 | mlx5_ib_dbg(dev, "copy to user failed\n"); | |
309 | err = -EFAULT; | |
310 | goto err_core; | |
311 | } | |
312 | ||
313 | init_attr->attr.max_wr = srq->msrq.max - 1; | |
314 | ||
315 | return &srq->ibsrq; | |
316 | ||
317 | err_core: | |
318 | mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); | |
56e1ab0f ML |
319 | |
320 | err_usr_kern_srq: | |
e126ba97 EC |
321 | if (pd->uobject) |
322 | destroy_srq_user(pd, srq); | |
323 | else | |
324 | destroy_srq_kernel(dev, srq); | |
325 | ||
326 | err_srq: | |
327 | kfree(srq); | |
328 | ||
329 | return ERR_PTR(err); | |
330 | } | |
331 | ||
332 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
333 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | |
334 | { | |
335 | struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | |
336 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
337 | int ret; | |
338 | ||
339 | /* We don't support resizing SRQs yet */ | |
340 | if (attr_mask & IB_SRQ_MAX_WR) | |
341 | return -EINVAL; | |
342 | ||
343 | if (attr_mask & IB_SRQ_LIMIT) { | |
344 | if (attr->srq_limit >= srq->msrq.max) | |
345 | return -EINVAL; | |
346 | ||
347 | mutex_lock(&srq->mutex); | |
348 | ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); | |
349 | mutex_unlock(&srq->mutex); | |
350 | ||
351 | if (ret) | |
352 | return ret; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
359 | { | |
360 | struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | |
361 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
362 | int ret; | |
363 | struct mlx5_query_srq_mbox_out *out; | |
364 | ||
365 | out = kzalloc(sizeof(*out), GFP_KERNEL); | |
366 | if (!out) | |
367 | return -ENOMEM; | |
368 | ||
369 | ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); | |
370 | if (ret) | |
371 | goto out_box; | |
372 | ||
373 | srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); | |
374 | srq_attr->max_wr = srq->msrq.max - 1; | |
375 | srq_attr->max_sge = srq->msrq.max_gs; | |
376 | ||
377 | out_box: | |
378 | kfree(out); | |
379 | return ret; | |
380 | } | |
381 | ||
382 | int mlx5_ib_destroy_srq(struct ib_srq *srq) | |
383 | { | |
384 | struct mlx5_ib_dev *dev = to_mdev(srq->device); | |
385 | struct mlx5_ib_srq *msrq = to_msrq(srq); | |
386 | ||
387 | mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); | |
388 | ||
389 | if (srq->uobject) { | |
390 | mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); | |
391 | ib_umem_release(msrq->umem); | |
392 | } else { | |
1faacf82 | 393 | destroy_srq_kernel(dev, msrq); |
e126ba97 EC |
394 | } |
395 | ||
396 | kfree(srq); | |
397 | return 0; | |
398 | } | |
399 | ||
400 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) | |
401 | { | |
402 | struct mlx5_wqe_srq_next_seg *next; | |
403 | ||
404 | /* always called with interrupts disabled. */ | |
405 | spin_lock(&srq->lock); | |
406 | ||
407 | next = get_wqe(srq, srq->tail); | |
408 | next->next_wqe_index = cpu_to_be16(wqe_index); | |
409 | srq->tail = wqe_index; | |
410 | ||
411 | spin_unlock(&srq->lock); | |
412 | } | |
413 | ||
414 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
415 | struct ib_recv_wr **bad_wr) | |
416 | { | |
417 | struct mlx5_ib_srq *srq = to_msrq(ibsrq); | |
418 | struct mlx5_wqe_srq_next_seg *next; | |
419 | struct mlx5_wqe_data_seg *scat; | |
420 | unsigned long flags; | |
421 | int err = 0; | |
422 | int nreq; | |
423 | int i; | |
424 | ||
425 | spin_lock_irqsave(&srq->lock, flags); | |
426 | ||
427 | for (nreq = 0; wr; nreq++, wr = wr->next) { | |
428 | if (unlikely(wr->num_sge > srq->msrq.max_gs)) { | |
429 | err = -EINVAL; | |
430 | *bad_wr = wr; | |
431 | break; | |
432 | } | |
433 | ||
434 | if (unlikely(srq->head == srq->tail)) { | |
435 | err = -ENOMEM; | |
436 | *bad_wr = wr; | |
437 | break; | |
438 | } | |
439 | ||
440 | srq->wrid[srq->head] = wr->wr_id; | |
441 | ||
442 | next = get_wqe(srq, srq->head); | |
443 | srq->head = be16_to_cpu(next->next_wqe_index); | |
444 | scat = (struct mlx5_wqe_data_seg *)(next + 1); | |
445 | ||
446 | for (i = 0; i < wr->num_sge; i++) { | |
447 | scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); | |
448 | scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); | |
449 | scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); | |
450 | } | |
451 | ||
452 | if (i < srq->msrq.max_avail_gather) { | |
453 | scat[i].byte_count = 0; | |
454 | scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); | |
455 | scat[i].addr = 0; | |
456 | } | |
457 | } | |
458 | ||
459 | if (likely(nreq)) { | |
460 | srq->wqe_ctr += nreq; | |
461 | ||
462 | /* Make sure that descriptors are written before | |
463 | * doorbell record. | |
464 | */ | |
465 | wmb(); | |
466 | ||
467 | *srq->db.db = cpu_to_be32(srq->wqe_ctr); | |
468 | } | |
469 | ||
470 | spin_unlock_irqrestore(&srq->lock, flags); | |
471 | ||
472 | return err; | |
473 | } |