]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/hw/mlx4/cq.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
f3cca4b1 36#include <linux/mlx4/srq.h>
5a0e3ad6 37#include <linux/slab.h>
225c7b1f
RD
38
39#include "mlx4_ib.h"
9ce28a20 40#include <rdma/mlx4-abi.h>
225c7b1f
RD
41
42static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43{
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49{
50 struct ib_event event;
51 struct ib_cq *ibcq;
52
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
987c8f8f 54 pr_warn("Unexpected event type %d "
225c7b1f
RD
55 "on CQ %06x\n", type, cq->cqn);
56 return;
57 }
58
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69{
08ff3235 70 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
225c7b1f
RD
71}
72
73static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74{
75 return get_cqe_from_buf(&cq->buf, n);
76}
77
78static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79{
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
08ff3235 81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
225c7b1f 82
08ff3235 83 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
225c7b1f
RD
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85}
86
87static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
88{
89 return get_sw_cqe(cq, cq->mcq.cons_index);
90}
91
3fdcb97f
EC
92int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
93{
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
96
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98}
99
bbf8eed1
VS
100static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
101{
102 int err;
103
08ff3235 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
8900b894 105 PAGE_SIZE * 2, &buf->buf);
bbf8eed1
VS
106
107 if (err)
108 goto out;
109
08ff3235 110 buf->entry_size = dev->dev->caps.cqe_size;
bbf8eed1
VS
111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
115
8900b894 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
bbf8eed1
VS
117 if (err)
118 goto err_mtt;
119
120 return 0;
121
122err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
124
125err_buf:
08ff3235 126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
bbf8eed1
VS
127
128out:
129 return err;
130}
131
132static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
133{
08ff3235 134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
bbf8eed1
VS
135}
136
b0ea0fa5
JG
137static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
138 struct mlx4_ib_cq_buf *buf,
139 struct ib_umem **umem, u64 buf_addr, int cqe)
bbf8eed1
VS
140{
141 int err;
08ff3235 142 int cqe_size = dev->dev->caps.cqe_size;
ed8637d3
GL
143 int shift;
144 int n;
bbf8eed1 145
b0ea0fa5 146 *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
cb9fbc5c 147 IB_ACCESS_LOCAL_WRITE, 1);
bbf8eed1
VS
148 if (IS_ERR(*umem))
149 return PTR_ERR(*umem);
150
ed8637d3
GL
151 n = ib_umem_page_count(*umem);
152 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
153 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
154
bbf8eed1
VS
155 if (err)
156 goto err_buf;
157
158 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
159 if (err)
160 goto err_mtt;
161
162 return 0;
163
164err_mtt:
165 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
166
167err_buf:
168 ib_umem_release(*umem);
169
170 return err;
171}
172
beb801ac 173#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
bcf4c1ea
MB
174struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
175 const struct ib_cq_init_attr *attr,
225c7b1f
RD
176 struct ib_ucontext *context,
177 struct ib_udata *udata)
178{
bcf4c1ea
MB
179 int entries = attr->cqe;
180 int vector = attr->comp_vector;
225c7b1f
RD
181 struct mlx4_ib_dev *dev = to_mdev(ibdev);
182 struct mlx4_ib_cq *cq;
183 struct mlx4_uar *uar;
e4567897 184 void *buf_addr;
225c7b1f
RD
185 int err;
186
4b664c43 187 if (entries < 1 || entries > dev->dev->caps.max_cqes)
bcf4c1ea
MB
188 return ERR_PTR(-EINVAL);
189
4b664c43 190 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
225c7b1f
RD
191 return ERR_PTR(-EINVAL);
192
0975890e 193 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
225c7b1f
RD
194 if (!cq)
195 return ERR_PTR(-ENOMEM);
196
197 entries = roundup_pow_of_two(entries + 1);
198 cq->ibcq.cqe = entries - 1;
bbf8eed1 199 mutex_init(&cq->resize_mutex);
225c7b1f 200 spin_lock_init(&cq->lock);
bbf8eed1
VS
201 cq->resize_buf = NULL;
202 cq->resize_umem = NULL;
4b664c43 203 cq->create_flags = attr->flags;
35f05dab
YH
204 INIT_LIST_HEAD(&cq->send_qp_list);
205 INIT_LIST_HEAD(&cq->recv_qp_list);
225c7b1f
RD
206
207 if (context) {
208 struct mlx4_ib_create_cq ucmd;
209
210 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
211 err = -EFAULT;
212 goto err_cq;
213 }
214
e4567897 215 buf_addr = (void *)(unsigned long)ucmd.buf_addr;
b0ea0fa5 216 err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
bbf8eed1 217 ucmd.buf_addr, entries);
225c7b1f 218 if (err)
bbf8eed1 219 goto err_cq;
225c7b1f 220
b0ea0fa5
JG
221 err = mlx4_ib_db_map_user(to_mucontext(context), udata,
222 ucmd.db_addr, &cq->db);
225c7b1f
RD
223 if (err)
224 goto err_mtt;
225
226 uar = &to_mucontext(context)->uar;
f3301870 227 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
225c7b1f 228 } else {
8900b894 229 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
225c7b1f
RD
230 if (err)
231 goto err_cq;
232
233 cq->mcq.set_ci_db = cq->db.db;
234 cq->mcq.arm_db = cq->db.db + 1;
235 *cq->mcq.set_ci_db = 0;
236 *cq->mcq.arm_db = 0;
237
bbf8eed1 238 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
225c7b1f 239 if (err)
bbf8eed1 240 goto err_db;
225c7b1f 241
e4567897
DJ
242 buf_addr = &cq->buf.buf;
243
225c7b1f 244 uar = &dev->priv_uar;
f3301870 245 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
225c7b1f
RD
246 }
247
e605b743
SP
248 if (dev->eq_table)
249 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
250
225c7b1f 251 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
4b664c43 252 cq->db.dma, &cq->mcq, vector, 0,
e4567897
DJ
253 !!(cq->create_flags &
254 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
255 buf_addr, !!context);
225c7b1f
RD
256 if (err)
257 goto err_dbmap;
258
3dca0f42
MB
259 if (context)
260 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
261 else
262 cq->mcq.comp = mlx4_ib_cq_comp;
225c7b1f
RD
263 cq->mcq.event = mlx4_ib_cq_event;
264
265 if (context)
266 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
267 err = -EFAULT;
593ff73b 268 goto err_cq_free;
225c7b1f
RD
269 }
270
271 return &cq->ibcq;
272
593ff73b
MB
273err_cq_free:
274 mlx4_cq_free(dev->dev, &cq->mcq);
275
225c7b1f
RD
276err_dbmap:
277 if (context)
278 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
279
280err_mtt:
281 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
282
225c7b1f
RD
283 if (context)
284 ib_umem_release(cq->umem);
285 else
3ae15e16 286 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
225c7b1f
RD
287
288err_db:
289 if (!context)
6296883c 290 mlx4_db_free(dev->dev, &cq->db);
225c7b1f
RD
291
292err_cq:
293 kfree(cq);
294
295 return ERR_PTR(err);
296}
297
bbf8eed1
VS
298static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
299 int entries)
300{
301 int err;
302
303 if (cq->resize_buf)
304 return -EBUSY;
305
0c87b672 306 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
bbf8eed1
VS
307 if (!cq->resize_buf)
308 return -ENOMEM;
309
310 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
311 if (err) {
312 kfree(cq->resize_buf);
313 cq->resize_buf = NULL;
314 return err;
315 }
316
317 cq->resize_buf->cqe = entries - 1;
318
319 return 0;
320}
321
322static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
323 int entries, struct ib_udata *udata)
324{
325 struct mlx4_ib_resize_cq ucmd;
326 int err;
327
328 if (cq->resize_umem)
329 return -EBUSY;
330
331 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
332 return -EFAULT;
333
0c87b672 334 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
bbf8eed1
VS
335 if (!cq->resize_buf)
336 return -ENOMEM;
337
b0ea0fa5 338 err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
bbf8eed1
VS
339 &cq->resize_umem, ucmd.buf_addr, entries);
340 if (err) {
341 kfree(cq->resize_buf);
342 cq->resize_buf = NULL;
343 return err;
344 }
345
346 cq->resize_buf->cqe = entries - 1;
347
348 return 0;
349}
350
351static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
352{
353 u32 i;
354
355 i = cq->mcq.cons_index;
93b80ac2 356 while (get_sw_cqe(cq, i))
bbf8eed1
VS
357 ++i;
358
359 return i - cq->mcq.cons_index;
360}
361
362static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
363{
7798dbf4 364 struct mlx4_cqe *cqe, *new_cqe;
bbf8eed1 365 int i;
08ff3235
OG
366 int cqe_size = cq->buf.entry_size;
367 int cqe_inc = cqe_size == 64 ? 1 : 0;
bbf8eed1
VS
368
369 i = cq->mcq.cons_index;
370 cqe = get_cqe(cq, i & cq->ibcq.cqe);
08ff3235
OG
371 cqe += cqe_inc;
372
bbf8eed1 373 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
7798dbf4
JM
374 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
375 (i + 1) & cq->resize_buf->cqe);
08ff3235
OG
376 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
377 new_cqe += cqe_inc;
378
7798dbf4
JM
379 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
380 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
bbf8eed1 381 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
08ff3235 382 cqe += cqe_inc;
bbf8eed1
VS
383 }
384 ++cq->mcq.cons_index;
385}
386
387int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
388{
389 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
390 struct mlx4_ib_cq *cq = to_mcq(ibcq);
42ab01c3 391 struct mlx4_mtt mtt;
bbf8eed1
VS
392 int outst_cqe;
393 int err;
394
395 mutex_lock(&cq->resize_mutex);
8ab9406a 396 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
bbf8eed1
VS
397 err = -EINVAL;
398 goto out;
399 }
400
401 entries = roundup_pow_of_two(entries + 1);
402 if (entries == ibcq->cqe + 1) {
403 err = 0;
404 goto out;
405 }
406
8ab9406a 407 if (entries > dev->dev->caps.max_cqes + 1) {
79d3da9c
EC
408 err = -EINVAL;
409 goto out;
410 }
411
bbf8eed1
VS
412 if (ibcq->uobject) {
413 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
414 if (err)
415 goto out;
416 } else {
025dfdaf 417 /* Can't be smaller than the number of outstanding CQEs */
bbf8eed1
VS
418 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
419 if (entries < outst_cqe + 1) {
8ab9406a 420 err = -EINVAL;
bbf8eed1
VS
421 goto out;
422 }
423
424 err = mlx4_alloc_resize_buf(dev, cq, entries);
425 if (err)
426 goto out;
427 }
428
42ab01c3
JM
429 mtt = cq->buf.mtt;
430
bbf8eed1
VS
431 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
432 if (err)
433 goto err_buf;
434
42ab01c3 435 mlx4_mtt_cleanup(dev->dev, &mtt);
bbf8eed1
VS
436 if (ibcq->uobject) {
437 cq->buf = cq->resize_buf->buf;
438 cq->ibcq.cqe = cq->resize_buf->cqe;
439 ib_umem_release(cq->umem);
440 cq->umem = cq->resize_umem;
441
442 kfree(cq->resize_buf);
443 cq->resize_buf = NULL;
444 cq->resize_umem = NULL;
445 } else {
3afa9f19
VS
446 struct mlx4_ib_cq_buf tmp_buf;
447 int tmp_cqe = 0;
448
bbf8eed1
VS
449 spin_lock_irq(&cq->lock);
450 if (cq->resize_buf) {
451 mlx4_ib_cq_resize_copy_cqes(cq);
3afa9f19
VS
452 tmp_buf = cq->buf;
453 tmp_cqe = cq->ibcq.cqe;
bbf8eed1
VS
454 cq->buf = cq->resize_buf->buf;
455 cq->ibcq.cqe = cq->resize_buf->cqe;
456
457 kfree(cq->resize_buf);
458 cq->resize_buf = NULL;
459 }
460 spin_unlock_irq(&cq->lock);
3afa9f19
VS
461
462 if (tmp_cqe)
463 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
bbf8eed1
VS
464 }
465
466 goto out;
467
468err_buf:
42ab01c3 469 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
bbf8eed1
VS
470 if (!ibcq->uobject)
471 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
472 cq->resize_buf->cqe);
473
474 kfree(cq->resize_buf);
475 cq->resize_buf = NULL;
476
477 if (cq->resize_umem) {
478 ib_umem_release(cq->resize_umem);
479 cq->resize_umem = NULL;
480 }
481
482out:
483 mutex_unlock(&cq->resize_mutex);
08ff3235 484
bbf8eed1
VS
485 return err;
486}
487
225c7b1f
RD
488int mlx4_ib_destroy_cq(struct ib_cq *cq)
489{
490 struct mlx4_ib_dev *dev = to_mdev(cq->device);
491 struct mlx4_ib_cq *mcq = to_mcq(cq);
492
493 mlx4_cq_free(dev->dev, &mcq->mcq);
494 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
495
496 if (cq->uobject) {
497 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
498 ib_umem_release(mcq->umem);
499 } else {
3ae15e16 500 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
6296883c 501 mlx4_db_free(dev->dev, &mcq->db);
225c7b1f
RD
502 }
503
504 kfree(mcq);
505
506 return 0;
507}
508
509static void dump_cqe(void *cqe)
510{
511 __be32 *buf = cqe;
512
987c8f8f 513 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
225c7b1f
RD
514 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
515 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
516 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
517}
518
519static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
520 struct ib_wc *wc)
521{
522 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
987c8f8f 523 pr_debug("local QP operation err "
225c7b1f
RD
524 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
525 "opcode = %02x)\n",
526 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
527 cqe->vendor_err_syndrome,
528 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
529 dump_cqe(cqe);
530 }
531
532 switch (cqe->syndrome) {
533 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
534 wc->status = IB_WC_LOC_LEN_ERR;
535 break;
536 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
537 wc->status = IB_WC_LOC_QP_OP_ERR;
538 break;
539 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
540 wc->status = IB_WC_LOC_PROT_ERR;
541 break;
542 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
543 wc->status = IB_WC_WR_FLUSH_ERR;
544 break;
545 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
546 wc->status = IB_WC_MW_BIND_ERR;
547 break;
548 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
549 wc->status = IB_WC_BAD_RESP_ERR;
550 break;
551 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
552 wc->status = IB_WC_LOC_ACCESS_ERR;
553 break;
554 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
555 wc->status = IB_WC_REM_INV_REQ_ERR;
556 break;
557 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
558 wc->status = IB_WC_REM_ACCESS_ERR;
559 break;
560 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
561 wc->status = IB_WC_REM_OP_ERR;
562 break;
563 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
564 wc->status = IB_WC_RETRY_EXC_ERR;
565 break;
566 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
567 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
568 break;
569 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
570 wc->status = IB_WC_REM_ABORT_ERR;
571 break;
572 default:
573 wc->status = IB_WC_GENERAL_ERR;
574 break;
575 }
576
577 wc->vendor_err = cqe->vendor_err_syndrome;
578}
579
f780a9f1 580static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
8ff095ec 581{
f780a9f1
YP
582 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
583 MLX4_CQE_STATUS_IPV4F |
584 MLX4_CQE_STATUS_IPV4OPT |
585 MLX4_CQE_STATUS_IPV6 |
586 MLX4_CQE_STATUS_IPOK)) ==
587 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
588 MLX4_CQE_STATUS_IPOK)) &&
589 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
590 MLX4_CQE_STATUS_TCP)) &&
8ff095ec
EC
591 checksum == cpu_to_be16(0xffff);
592}
593
e6a00f66
YS
594static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
595 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
1ffeb2eb
JM
596{
597 struct mlx4_ib_proxy_sqp_hdr *hdr;
598
599 ib_dma_sync_single_for_cpu(qp->ibqp.device,
600 qp->sqp_proxy_rcv[tail].map,
601 sizeof (struct mlx4_ib_proxy_sqp_hdr),
602 DMA_FROM_DEVICE);
603 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
604 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
1ffeb2eb
JM
605 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
606 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
607 wc->dlid_path_bits = 0;
608
5ea8bbfc 609 if (is_eth) {
65389322 610 wc->slid = 0;
5ea8bbfc
JM
611 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
612 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
613 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
614 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
615 } else {
616 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
617 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
618 }
1ffeb2eb
JM
619}
620
35f05dab
YH
621static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
622 struct ib_wc *wc, int *npolled, int is_send)
623{
624 struct mlx4_ib_wq *wq;
625 unsigned cur;
626 int i;
627
628 wq = is_send ? &qp->sq : &qp->rq;
629 cur = wq->head - wq->tail;
630
631 if (cur == 0)
632 return;
633
634 for (i = 0; i < cur && *npolled < num_entries; i++) {
635 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
636 wc->status = IB_WC_WR_FLUSH_ERR;
637 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
638 wq->tail++;
639 (*npolled)++;
640 wc->qp = &qp->ibqp;
641 wc++;
642 }
643}
644
645static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
646 struct ib_wc *wc, int *npolled)
647{
648 struct mlx4_ib_qp *qp;
649
650 *npolled = 0;
faa9141c 651 /* Find uncompleted WQEs belonging to that cq and return
35f05dab
YH
652 * simulated FLUSH_ERR completions
653 */
654 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
799cdaf8 655 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
35f05dab
YH
656 if (*npolled >= num_entries)
657 goto out;
658 }
659
660 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
661 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
662 if (*npolled >= num_entries)
663 goto out;
664 }
665
666out:
667 return;
668}
669
225c7b1f
RD
670static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
671 struct mlx4_ib_qp **cur_qp,
672 struct ib_wc *wc)
673{
674 struct mlx4_cqe *cqe;
675 struct mlx4_qp *mqp;
676 struct mlx4_ib_wq *wq;
677 struct mlx4_ib_srq *srq;
f3cca4b1 678 struct mlx4_srq *msrq = NULL;
225c7b1f
RD
679 int is_send;
680 int is_error;
5ea8bbfc 681 int is_eth;
b3226184 682 u32 g_mlpath_rqpn;
225c7b1f 683 u16 wqe_ctr;
1ffeb2eb 684 unsigned tail = 0;
225c7b1f 685
bbf8eed1 686repoll:
225c7b1f
RD
687 cqe = next_cqe_sw(cq);
688 if (!cqe)
689 return -EAGAIN;
690
08ff3235
OG
691 if (cq->buf.entry_size == 64)
692 cqe++;
693
225c7b1f
RD
694 ++cq->mcq.cons_index;
695
696 /*
697 * Make sure we read CQ entry contents after we've checked the
698 * ownership bit.
699 */
700 rmb();
701
702 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
703 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
704 MLX4_CQE_OPCODE_ERROR;
705
bbf8eed1
VS
706 /* Resize CQ in progress */
707 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
708 if (cq->resize_buf) {
709 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
710
711 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
712 cq->buf = cq->resize_buf->buf;
713 cq->ibcq.cqe = cq->resize_buf->cqe;
714
715 kfree(cq->resize_buf);
716 cq->resize_buf = NULL;
717 }
718
719 goto repoll;
720 }
721
225c7b1f 722 if (!*cur_qp ||
f780a9f1 723 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
225c7b1f
RD
724 /*
725 * We do not have to take the QP table lock here,
726 * because CQs will be locked while QPs are removed
727 * from the table.
728 */
729 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
f780a9f1 730 be32_to_cpu(cqe->vlan_my_qpn));
225c7b1f
RD
731 *cur_qp = to_mibqp(mqp);
732 }
733
734 wc->qp = &(*cur_qp)->ibqp;
735
f3cca4b1
SP
736 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
737 u32 srq_num;
738 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
739 srq_num = g_mlpath_rqpn & 0xffffff;
740 /* SRQ is also in the radix tree */
741 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
742 srq_num);
f3cca4b1
SP
743 }
744
225c7b1f
RD
745 if (is_send) {
746 wq = &(*cur_qp)->sq;
ea54b10c
JM
747 if (!(*cur_qp)->sq_signal_bits) {
748 wqe_ctr = be16_to_cpu(cqe->wqe_index);
749 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
750 }
0e6e7416 751 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
225c7b1f
RD
752 ++wq->tail;
753 } else if ((*cur_qp)->ibqp.srq) {
754 srq = to_msrq((*cur_qp)->ibqp.srq);
755 wqe_ctr = be16_to_cpu(cqe->wqe_index);
756 wc->wr_id = srq->wrid[wqe_ctr];
757 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
f3cca4b1
SP
758 } else if (msrq) {
759 srq = to_mibsrq(msrq);
760 wqe_ctr = be16_to_cpu(cqe->wqe_index);
761 wc->wr_id = srq->wrid[wqe_ctr];
762 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
225c7b1f
RD
763 } else {
764 wq = &(*cur_qp)->rq;
1ffeb2eb
JM
765 tail = wq->tail & (wq->wqe_cnt - 1);
766 wc->wr_id = wq->wrid[tail];
225c7b1f
RD
767 ++wq->tail;
768 }
769
770 if (unlikely(is_error)) {
771 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
772 return 0;
773 }
774
775 wc->status = IB_WC_SUCCESS;
776
777 if (is_send) {
778 wc->wc_flags = 0;
779 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
780 case MLX4_OPCODE_RDMA_WRITE_IMM:
781 wc->wc_flags |= IB_WC_WITH_IMM;
8aff1fb3 782 /* fall through */
225c7b1f
RD
783 case MLX4_OPCODE_RDMA_WRITE:
784 wc->opcode = IB_WC_RDMA_WRITE;
785 break;
786 case MLX4_OPCODE_SEND_IMM:
787 wc->wc_flags |= IB_WC_WITH_IMM;
8aff1fb3 788 /* fall through */
225c7b1f 789 case MLX4_OPCODE_SEND:
95d04f07 790 case MLX4_OPCODE_SEND_INVAL:
225c7b1f
RD
791 wc->opcode = IB_WC_SEND;
792 break;
793 case MLX4_OPCODE_RDMA_READ:
19891915 794 wc->opcode = IB_WC_RDMA_READ;
225c7b1f
RD
795 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
796 break;
797 case MLX4_OPCODE_ATOMIC_CS:
798 wc->opcode = IB_WC_COMP_SWAP;
799 wc->byte_len = 8;
800 break;
801 case MLX4_OPCODE_ATOMIC_FA:
802 wc->opcode = IB_WC_FETCH_ADD;
803 wc->byte_len = 8;
804 break;
6fa8f719
VS
805 case MLX4_OPCODE_MASKED_ATOMIC_CS:
806 wc->opcode = IB_WC_MASKED_COMP_SWAP;
807 wc->byte_len = 8;
808 break;
809 case MLX4_OPCODE_MASKED_ATOMIC_FA:
810 wc->opcode = IB_WC_MASKED_FETCH_ADD;
811 wc->byte_len = 8;
812 break;
b832be1e
EC
813 case MLX4_OPCODE_LSO:
814 wc->opcode = IB_WC_LSO;
815 break;
95d04f07 816 case MLX4_OPCODE_FMR:
e761c67f 817 wc->opcode = IB_WC_REG_MR;
95d04f07
RD
818 break;
819 case MLX4_OPCODE_LOCAL_INVAL:
820 wc->opcode = IB_WC_LOCAL_INV;
821 break;
225c7b1f
RD
822 }
823 } else {
824 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
825
826 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
827 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
00f7ec36
SW
828 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
829 wc->wc_flags = IB_WC_WITH_IMM;
830 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f 831 break;
95d04f07
RD
832 case MLX4_RECV_OPCODE_SEND_INVAL:
833 wc->opcode = IB_WC_RECV;
834 wc->wc_flags = IB_WC_WITH_INVALIDATE;
835 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
836 break;
225c7b1f
RD
837 case MLX4_RECV_OPCODE_SEND:
838 wc->opcode = IB_WC_RECV;
839 wc->wc_flags = 0;
840 break;
841 case MLX4_RECV_OPCODE_SEND_IMM:
00f7ec36
SW
842 wc->opcode = IB_WC_RECV;
843 wc->wc_flags = IB_WC_WITH_IMM;
844 wc->ex.imm_data = cqe->immed_rss_invalid;
225c7b1f
RD
845 break;
846 }
847
5ea8bbfc
JM
848 is_eth = (rdma_port_get_link_layer(wc->qp->device,
849 (*cur_qp)->port) ==
850 IB_LINK_LAYER_ETHERNET);
1ffeb2eb
JM
851 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
852 if ((*cur_qp)->mlx4_ib_qp_type &
853 (MLX4_IB_QPT_PROXY_SMI_OWNER |
e6a00f66
YS
854 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
855 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
856 is_eth);
857 return 0;
858 }
1ffeb2eb
JM
859 }
860
b3226184
RD
861 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
862 wc->src_qp = g_mlpath_rqpn & 0xffffff;
863 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
864 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
e1bb7843 865 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
d927d505
OG
866 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
867 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
5ea8bbfc 868 if (is_eth) {
65389322 869 wc->slid = 0;
9106c410 870 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
5ea8bbfc 871 if (be32_to_cpu(cqe->vlan_my_qpn) &
e802f8e4 872 MLX4_CQE_CVLAN_PRESENT_MASK) {
5ea8bbfc
JM
873 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
874 MLX4_CQE_VID_MASK;
875 } else {
876 wc->vlan_id = 0xffff;
877 }
878 memcpy(wc->smac, cqe->smac, ETH_ALEN);
879 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
297e0dad 880 } else {
65389322 881 wc->slid = be16_to_cpu(cqe->rlid);
5ea8bbfc 882 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
297e0dad
MS
883 wc->vlan_id = 0xffff;
884 }
225c7b1f
RD
885 }
886
887 return 0;
888}
889
890int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
891{
892 struct mlx4_ib_cq *cq = to_mcq(ibcq);
893 struct mlx4_ib_qp *cur_qp = NULL;
894 unsigned long flags;
895 int npolled;
35f05dab 896 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
225c7b1f
RD
897
898 spin_lock_irqsave(&cq->lock, flags);
35f05dab
YH
899 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
900 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
901 goto out;
902 }
225c7b1f
RD
903
904 for (npolled = 0; npolled < num_entries; ++npolled) {
20697434 905 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
225c7b1f
RD
906 break;
907 }
908
3616f9ce 909 mlx4_cq_set_ci(&cq->mcq);
225c7b1f 910
35f05dab 911out:
225c7b1f
RD
912 spin_unlock_irqrestore(&cq->lock, flags);
913
20697434 914 return npolled;
225c7b1f
RD
915}
916
917int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
918{
919 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
920 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
921 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
922 to_mdev(ibcq->device)->uar_map,
923 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
924
925 return 0;
926}
927
928void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
929{
930 u32 prod_index;
931 int nfreed = 0;
082dee32
JM
932 struct mlx4_cqe *cqe, *dest;
933 u8 owner_bit;
08ff3235 934 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
225c7b1f
RD
935
936 /*
937 * First we need to find the current producer index, so we
938 * know where to start cleaning from. It doesn't matter if HW
939 * adds new entries after this loop -- the QP we're worried
940 * about is already in RESET, so the new entries won't come
941 * from our QP and therefore don't need to be checked.
942 */
943 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
944 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
945 break;
946
947 /*
948 * Now sweep backwards through the CQ, removing CQ entries
949 * that match our QP by copying older entries on top of them.
950 */
951 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
952 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
08ff3235
OG
953 cqe += cqe_inc;
954
f780a9f1 955 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
225c7b1f
RD
956 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
957 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
958 ++nfreed;
082dee32
JM
959 } else if (nfreed) {
960 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
08ff3235
OG
961 dest += cqe_inc;
962
082dee32
JM
963 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
964 memcpy(dest, cqe, sizeof *cqe);
965 dest->owner_sr_opcode = owner_bit |
966 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
967 }
225c7b1f
RD
968 }
969
970 if (nfreed) {
971 cq->mcq.cons_index += nfreed;
972 /*
973 * Make sure update of buffer contents is done before
974 * updating consumer index.
975 */
976 wmb();
977 mlx4_cq_set_ci(&cq->mcq);
978 }
979}
980
981void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
982{
983 spin_lock_irq(&cq->lock);
984 __mlx4_ib_cq_clean(cq, qpn, srq);
985 spin_unlock_irq(&cq->lock);
986}