]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/hw/mlx5/cq.c
Merge remote-tracking branches 'asoc/topic/inntel', 'asoc/topic/input', 'asoc/topic...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / mlx5 / cq.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kref.h>
34#include <rdma/ib_umem.h>
a8237b32 35#include <rdma/ib_user_verbs.h>
b636401f 36#include <rdma/ib_cache.h>
e126ba97 37#include "mlx5_ib.h"
e126ba97
EC
38
39static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
40{
41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
42
43 ibcq->comp_handler(ibcq, ibcq->cq_context);
44}
45
46static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
47{
48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
50 struct ib_cq *ibcq = &cq->ibcq;
51 struct ib_event event;
52
53 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
54 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
55 type, mcq->cqn);
56 return;
57 }
58
59 if (ibcq->event_handler) {
60 event.device = &dev->ib_dev;
61 event.event = IB_EVENT_CQ_ERR;
62 event.element.cq = ibcq;
63 ibcq->event_handler(&event, ibcq->cq_context);
64 }
65}
66
67static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
68{
69 return mlx5_buf_offset(&buf->buf, n * size);
70}
71
72static void *get_cqe(struct mlx5_ib_cq *cq, int n)
73{
74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
75}
76
bde51583
EC
77static u8 sw_ownership_bit(int n, int nent)
78{
79 return (n & nent) ? 1 : 0;
80}
81
e126ba97
EC
82static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
83{
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
85 struct mlx5_cqe64 *cqe64;
86
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
bde51583
EC
88
89 if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
91 return cqe;
92 } else {
93 return NULL;
94 }
e126ba97
EC
95}
96
97static void *next_cqe_sw(struct mlx5_ib_cq *cq)
98{
99 return get_sw_cqe(cq, cq->mcq.cons_index);
100}
101
102static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
103{
104 switch (wq->wr_data[idx]) {
105 case MLX5_IB_WR_UMR:
106 return 0;
107
108 case IB_WR_LOCAL_INV:
109 return IB_WC_LOCAL_INV;
110
8a187ee5
SG
111 case IB_WR_REG_MR:
112 return IB_WC_REG_MR;
113
e126ba97
EC
114 default:
115 pr_warn("unknown completion status\n");
116 return 0;
117 }
118}
119
120static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
121 struct mlx5_ib_wq *wq, int idx)
122{
123 wc->wc_flags = 0;
124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
125 case MLX5_OPCODE_RDMA_WRITE_IMM:
126 wc->wc_flags |= IB_WC_WITH_IMM;
127 case MLX5_OPCODE_RDMA_WRITE:
128 wc->opcode = IB_WC_RDMA_WRITE;
129 break;
130 case MLX5_OPCODE_SEND_IMM:
131 wc->wc_flags |= IB_WC_WITH_IMM;
132 case MLX5_OPCODE_SEND:
133 case MLX5_OPCODE_SEND_INVAL:
134 wc->opcode = IB_WC_SEND;
135 break;
136 case MLX5_OPCODE_RDMA_READ:
137 wc->opcode = IB_WC_RDMA_READ;
138 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
139 break;
140 case MLX5_OPCODE_ATOMIC_CS:
141 wc->opcode = IB_WC_COMP_SWAP;
142 wc->byte_len = 8;
143 break;
144 case MLX5_OPCODE_ATOMIC_FA:
145 wc->opcode = IB_WC_FETCH_ADD;
146 wc->byte_len = 8;
147 break;
148 case MLX5_OPCODE_ATOMIC_MASKED_CS:
149 wc->opcode = IB_WC_MASKED_COMP_SWAP;
150 wc->byte_len = 8;
151 break;
152 case MLX5_OPCODE_ATOMIC_MASKED_FA:
153 wc->opcode = IB_WC_MASKED_FETCH_ADD;
154 wc->byte_len = 8;
155 break;
e126ba97
EC
156 case MLX5_OPCODE_UMR:
157 wc->opcode = get_umr_comp(wq, idx);
158 break;
159 }
160}
161
162enum {
163 MLX5_GRH_IN_BUFFER = 1,
164 MLX5_GRH_IN_CQE = 2,
165};
166
167static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
168 struct mlx5_ib_qp *qp)
169{
cb34be6d 170 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
e126ba97
EC
171 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
172 struct mlx5_ib_srq *srq;
173 struct mlx5_ib_wq *wq;
174 u16 wqe_ctr;
175 u8 g;
176
177 if (qp->ibqp.srq || qp->ibqp.xrcd) {
178 struct mlx5_core_srq *msrq = NULL;
179
180 if (qp->ibqp.xrcd) {
9603b61d 181 msrq = mlx5_core_get_srq(dev->mdev,
e126ba97
EC
182 be32_to_cpu(cqe->srqn));
183 srq = to_mibsrq(msrq);
184 } else {
185 srq = to_msrq(qp->ibqp.srq);
186 }
187 if (srq) {
188 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
189 wc->wr_id = srq->wrid[wqe_ctr];
190 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
191 if (msrq && atomic_dec_and_test(&msrq->refcount))
192 complete(&msrq->free);
193 }
194 } else {
195 wq = &qp->rq;
196 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
197 ++wq->tail;
198 }
199 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
200
201 switch (cqe->op_own >> 4) {
202 case MLX5_CQE_RESP_WR_IMM:
203 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
204 wc->wc_flags = IB_WC_WITH_IMM;
205 wc->ex.imm_data = cqe->imm_inval_pkey;
206 break;
207 case MLX5_CQE_RESP_SEND:
208 wc->opcode = IB_WC_RECV;
c7ce833b
ES
209 wc->wc_flags = IB_WC_IP_CSUM_OK;
210 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
211 (cqe->hds_ip_ext & CQE_L4_OK))))
212 wc->wc_flags = 0;
e126ba97
EC
213 break;
214 case MLX5_CQE_RESP_SEND_IMM:
215 wc->opcode = IB_WC_RECV;
216 wc->wc_flags = IB_WC_WITH_IMM;
217 wc->ex.imm_data = cqe->imm_inval_pkey;
218 break;
219 case MLX5_CQE_RESP_SEND_INV:
220 wc->opcode = IB_WC_RECV;
221 wc->wc_flags = IB_WC_WITH_INVALIDATE;
222 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
223 break;
224 }
225 wc->slid = be16_to_cpu(cqe->slid);
226 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
227 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
228 wc->dlid_path_bits = cqe->ml_path;
229 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
230 wc->wc_flags |= g ? IB_WC_GRH : 0;
b636401f
SG
231 if (unlikely(is_qp1(qp->ibqp.qp_type))) {
232 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
233
234 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
235 &wc->pkey_index);
236 } else {
237 wc->pkey_index = 0;
238 }
cb34be6d
AS
239
240 if (ll != IB_LINK_LAYER_ETHERNET)
241 return;
242
243 switch (wc->sl & 0x3) {
244 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
245 wc->network_hdr_type = RDMA_NETWORK_IB;
246 break;
247 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
248 wc->network_hdr_type = RDMA_NETWORK_IPV6;
249 break;
250 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
251 wc->network_hdr_type = RDMA_NETWORK_IPV4;
252 break;
253 }
254 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
e126ba97
EC
255}
256
257static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
258{
259 __be32 *p = (__be32 *)cqe;
260 int i;
261
262 mlx5_ib_warn(dev, "dump error cqe\n");
263 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
264 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
265 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
266 be32_to_cpu(p[3]));
267}
268
269static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
270 struct mlx5_err_cqe *cqe,
271 struct ib_wc *wc)
272{
273 int dump = 1;
274
275 switch (cqe->syndrome) {
276 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
277 wc->status = IB_WC_LOC_LEN_ERR;
278 break;
279 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
280 wc->status = IB_WC_LOC_QP_OP_ERR;
281 break;
282 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
283 wc->status = IB_WC_LOC_PROT_ERR;
284 break;
285 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
286 dump = 0;
287 wc->status = IB_WC_WR_FLUSH_ERR;
288 break;
289 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
290 wc->status = IB_WC_MW_BIND_ERR;
291 break;
292 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
293 wc->status = IB_WC_BAD_RESP_ERR;
294 break;
295 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
296 wc->status = IB_WC_LOC_ACCESS_ERR;
297 break;
298 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
299 wc->status = IB_WC_REM_INV_REQ_ERR;
300 break;
301 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
302 wc->status = IB_WC_REM_ACCESS_ERR;
303 break;
304 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
305 wc->status = IB_WC_REM_OP_ERR;
306 break;
307 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
308 wc->status = IB_WC_RETRY_EXC_ERR;
309 dump = 0;
310 break;
311 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
312 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
313 dump = 0;
314 break;
315 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
316 wc->status = IB_WC_REM_ABORT_ERR;
317 break;
318 default:
319 wc->status = IB_WC_GENERAL_ERR;
320 break;
321 }
322
323 wc->vendor_err = cqe->vendor_err_synd;
324 if (dump)
325 dump_cqe(dev, cqe);
326}
327
328static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
329{
330 /* TBD: waiting decision
331 */
332 return 0;
333}
334
335static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
336{
337 struct mlx5_wqe_data_seg *dpseg;
338 void *addr;
339
340 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
341 sizeof(struct mlx5_wqe_raddr_seg) +
342 sizeof(struct mlx5_wqe_atomic_seg);
343 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
344 return addr;
345}
346
347static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
348 uint16_t idx)
349{
350 void *addr;
351 int byte_count;
352 int i;
353
354 if (!is_atomic_response(qp, idx))
355 return;
356
357 byte_count = be32_to_cpu(cqe64->byte_cnt);
358 addr = mlx5_get_atomic_laddr(qp, idx);
359
360 if (byte_count == 4) {
361 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
362 } else {
363 for (i = 0; i < byte_count; i += 8) {
364 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
365 addr += 8;
366 }
367 }
368
369 return;
370}
371
372static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
373 u16 tail, u16 head)
374{
f241e749 375 u16 idx;
e126ba97
EC
376
377 do {
378 idx = tail & (qp->sq.wqe_cnt - 1);
379 handle_atomic(qp, cqe64, idx);
380 if (idx == head)
381 break;
382
383 tail = qp->sq.w_list[idx].next;
384 } while (1);
385 tail = qp->sq.w_list[idx].next;
386 qp->sq.last_poll = tail;
387}
388
bde51583
EC
389static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
390{
9603b61d 391 mlx5_buf_free(dev->mdev, &buf->buf);
bde51583
EC
392}
393
d5436ba0
SG
394static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
395 struct ib_sig_err *item)
396{
397 u16 syndrome = be16_to_cpu(cqe->syndrome);
398
399#define GUARD_ERR (1 << 13)
400#define APPTAG_ERR (1 << 12)
401#define REFTAG_ERR (1 << 11)
402
403 if (syndrome & GUARD_ERR) {
404 item->err_type = IB_SIG_BAD_GUARD;
405 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
406 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
407 } else
408 if (syndrome & REFTAG_ERR) {
409 item->err_type = IB_SIG_BAD_REFTAG;
410 item->expected = be32_to_cpu(cqe->expected_reftag);
411 item->actual = be32_to_cpu(cqe->actual_reftag);
412 } else
413 if (syndrome & APPTAG_ERR) {
414 item->err_type = IB_SIG_BAD_APPTAG;
415 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
416 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
417 } else {
418 pr_err("Got signature completion error with bad syndrome %04x\n",
419 syndrome);
420 }
421
422 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
423 item->key = be32_to_cpu(cqe->mkey);
424}
425
89ea94a7
MG
426static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
427 struct ib_wc *wc, int *npolled)
428{
429 struct mlx5_ib_wq *wq;
430 unsigned int cur;
431 unsigned int idx;
432 int np;
433 int i;
434
435 wq = &qp->sq;
436 cur = wq->head - wq->tail;
437 np = *npolled;
438
439 if (cur == 0)
440 return;
441
442 for (i = 0; i < cur && np < num_entries; i++) {
443 idx = wq->last_poll & (wq->wqe_cnt - 1);
444 wc->wr_id = wq->wrid[idx];
445 wc->status = IB_WC_WR_FLUSH_ERR;
446 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
447 wq->tail++;
448 np++;
449 wc->qp = &qp->ibqp;
450 wc++;
451 wq->last_poll = wq->w_list[idx].next;
452 }
453 *npolled = np;
454}
455
456static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
457 struct ib_wc *wc, int *npolled)
458{
459 struct mlx5_ib_wq *wq;
460 unsigned int cur;
461 int np;
462 int i;
463
464 wq = &qp->rq;
465 cur = wq->head - wq->tail;
466 np = *npolled;
467
468 if (cur == 0)
469 return;
470
471 for (i = 0; i < cur && np < num_entries; i++) {
472 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
473 wc->status = IB_WC_WR_FLUSH_ERR;
474 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
475 wq->tail++;
476 np++;
477 wc->qp = &qp->ibqp;
478 wc++;
479 }
480 *npolled = np;
481}
482
483static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
484 struct ib_wc *wc, int *npolled)
485{
486 struct mlx5_ib_qp *qp;
487
488 *npolled = 0;
489 /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
490 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
491 sw_send_comp(qp, num_entries, wc + *npolled, npolled);
492 if (*npolled >= num_entries)
493 return;
494 }
495
496 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
497 sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
498 if (*npolled >= num_entries)
499 return;
500 }
501}
502
e126ba97
EC
503static int mlx5_poll_one(struct mlx5_ib_cq *cq,
504 struct mlx5_ib_qp **cur_qp,
505 struct ib_wc *wc)
506{
507 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
508 struct mlx5_err_cqe *err_cqe;
509 struct mlx5_cqe64 *cqe64;
510 struct mlx5_core_qp *mqp;
511 struct mlx5_ib_wq *wq;
d5436ba0 512 struct mlx5_sig_err_cqe *sig_err_cqe;
a606b0f6 513 struct mlx5_core_mkey *mmkey;
d5436ba0 514 struct mlx5_ib_mr *mr;
e126ba97
EC
515 uint8_t opcode;
516 uint32_t qpn;
517 u16 wqe_ctr;
518 void *cqe;
519 int idx;
520
bde51583 521repoll:
e126ba97
EC
522 cqe = next_cqe_sw(cq);
523 if (!cqe)
524 return -EAGAIN;
525
526 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
527
528 ++cq->mcq.cons_index;
529
530 /* Make sure we read CQ entry contents after we've checked the
531 * ownership bit.
532 */
533 rmb();
534
bde51583
EC
535 opcode = cqe64->op_own >> 4;
536 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
537 if (likely(cq->resize_buf)) {
538 free_cq_buf(dev, &cq->buf);
539 cq->buf = *cq->resize_buf;
540 kfree(cq->resize_buf);
541 cq->resize_buf = NULL;
542 goto repoll;
543 } else {
544 mlx5_ib_warn(dev, "unexpected resize cqe\n");
545 }
546 }
e126ba97
EC
547
548 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
549 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
550 /* We do not have to take the QP table lock here,
551 * because CQs will be locked while QPs are removed
552 * from the table.
553 */
9603b61d 554 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
e126ba97
EC
555 *cur_qp = to_mibqp(mqp);
556 }
557
558 wc->qp = &(*cur_qp)->ibqp;
e126ba97
EC
559 switch (opcode) {
560 case MLX5_CQE_REQ:
561 wq = &(*cur_qp)->sq;
562 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
563 idx = wqe_ctr & (wq->wqe_cnt - 1);
564 handle_good_req(wc, cqe64, wq, idx);
565 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
566 wc->wr_id = wq->wrid[idx];
567 wq->tail = wq->wqe_head[idx] + 1;
568 wc->status = IB_WC_SUCCESS;
569 break;
570 case MLX5_CQE_RESP_WR_IMM:
571 case MLX5_CQE_RESP_SEND:
572 case MLX5_CQE_RESP_SEND_IMM:
573 case MLX5_CQE_RESP_SEND_INV:
574 handle_responder(wc, cqe64, *cur_qp);
575 wc->status = IB_WC_SUCCESS;
576 break;
577 case MLX5_CQE_RESIZE_CQ:
578 break;
579 case MLX5_CQE_REQ_ERR:
580 case MLX5_CQE_RESP_ERR:
581 err_cqe = (struct mlx5_err_cqe *)cqe64;
582 mlx5_handle_error_cqe(dev, err_cqe, wc);
583 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
584 opcode == MLX5_CQE_REQ_ERR ?
585 "Requestor" : "Responder", cq->mcq.cqn);
586 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
587 err_cqe->syndrome, err_cqe->vendor_err_synd);
588 if (opcode == MLX5_CQE_REQ_ERR) {
589 wq = &(*cur_qp)->sq;
590 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
591 idx = wqe_ctr & (wq->wqe_cnt - 1);
592 wc->wr_id = wq->wrid[idx];
593 wq->tail = wq->wqe_head[idx] + 1;
594 } else {
595 struct mlx5_ib_srq *srq;
596
597 if ((*cur_qp)->ibqp.srq) {
598 srq = to_msrq((*cur_qp)->ibqp.srq);
599 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
600 wc->wr_id = srq->wrid[wqe_ctr];
601 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
602 } else {
603 wq = &(*cur_qp)->rq;
604 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
605 ++wq->tail;
606 }
607 }
608 break;
d5436ba0
SG
609 case MLX5_CQE_SIG_ERR:
610 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
611
a606b0f6
MB
612 read_lock(&dev->mdev->priv.mkey_table.lock);
613 mmkey = __mlx5_mr_lookup(dev->mdev,
614 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
a606b0f6 615 mr = to_mibmr(mmkey);
d5436ba0
SG
616 get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
617 mr->sig->sig_err_exists = true;
618 mr->sig->sigerr_count++;
619
620 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
621 cq->mcq.cqn, mr->sig->err_item.key,
622 mr->sig->err_item.err_type,
623 mr->sig->err_item.sig_err_offset,
624 mr->sig->err_item.expected,
625 mr->sig->err_item.actual);
626
a606b0f6 627 read_unlock(&dev->mdev->priv.mkey_table.lock);
d5436ba0 628 goto repoll;
e126ba97
EC
629 }
630
631 return 0;
632}
633
25361e02
HE
634static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
635 struct ib_wc *wc)
636{
637 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
638 struct mlx5_ib_wc *soft_wc, *next;
639 int npolled = 0;
640
641 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
642 if (npolled >= num_entries)
643 break;
644
645 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
646 cq->mcq.cqn);
647
648 wc[npolled++] = soft_wc->wc;
649 list_del(&soft_wc->list);
650 kfree(soft_wc);
651 }
652
653 return npolled;
654}
655
e126ba97
EC
656int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
657{
658 struct mlx5_ib_cq *cq = to_mcq(ibcq);
659 struct mlx5_ib_qp *cur_qp = NULL;
89ea94a7
MG
660 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
661 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 662 unsigned long flags;
25361e02 663 int soft_polled = 0;
e126ba97 664 int npolled;
e126ba97
EC
665
666 spin_lock_irqsave(&cq->lock, flags);
89ea94a7
MG
667 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
668 mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
669 goto out;
670 }
e126ba97 671
25361e02
HE
672 if (unlikely(!list_empty(&cq->wc_list)))
673 soft_polled = poll_soft_wc(cq, num_entries, wc);
674
675 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
dbdf7d4e 676 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
e126ba97
EC
677 break;
678 }
679
680 if (npolled)
681 mlx5_cq_set_ci(&cq->mcq);
89ea94a7 682out:
e126ba97
EC
683 spin_unlock_irqrestore(&cq->lock, flags);
684
dbdf7d4e 685 return soft_polled + npolled;
e126ba97
EC
686}
687
688int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
689{
ce0f7509 690 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
25361e02 691 struct mlx5_ib_cq *cq = to_mcq(ibcq);
ce0f7509 692 void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
25361e02
HE
693 unsigned long irq_flags;
694 int ret = 0;
695
696 spin_lock_irqsave(&cq->lock, irq_flags);
697 if (cq->notify_flags != IB_CQ_NEXT_COMP)
698 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
ce0f7509 699
25361e02
HE
700 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
701 ret = 1;
702 spin_unlock_irqrestore(&cq->lock, irq_flags);
703
704 mlx5_cq_arm(&cq->mcq,
e126ba97
EC
705 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
706 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
ce0f7509
SM
707 uar_page,
708 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
709 to_mcq(ibcq)->mcq.cons_index);
e126ba97 710
25361e02 711 return ret;
e126ba97
EC
712}
713
714static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
715 int nent, int cqe_size)
716{
717 int err;
718
64ffaa21 719 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
e126ba97
EC
720 if (err)
721 return err;
722
723 buf->cqe_size = cqe_size;
bde51583 724 buf->nent = nent;
e126ba97
EC
725
726 return 0;
727}
728
e126ba97
EC
729static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
730 struct ib_ucontext *context, struct mlx5_ib_cq *cq,
27827786 731 int entries, u32 **cqb,
e126ba97
EC
732 int *cqe_size, int *index, int *inlen)
733{
734 struct mlx5_ib_create_cq ucmd;
a8237b32 735 size_t ucmdlen;
e126ba97 736 int page_shift;
27827786 737 __be64 *pas;
e126ba97
EC
738 int npages;
739 int ncont;
27827786 740 void *cqc;
e126ba97
EC
741 int err;
742
a8237b32
YD
743 ucmdlen =
744 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
745 sizeof(ucmd)) ? (sizeof(ucmd) -
746 sizeof(ucmd.reserved)) : sizeof(ucmd);
747
748 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
e126ba97
EC
749 return -EFAULT;
750
a8237b32
YD
751 if (ucmdlen == sizeof(ucmd) &&
752 ucmd.reserved != 0)
753 return -EINVAL;
754
e126ba97
EC
755 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
756 return -EINVAL;
757
758 *cqe_size = ucmd.cqe_size;
759
760 cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
761 entries * ucmd.cqe_size,
762 IB_ACCESS_LOCAL_WRITE, 1);
763 if (IS_ERR(cq->buf.umem)) {
764 err = PTR_ERR(cq->buf.umem);
765 return err;
766 }
767
768 err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
769 &cq->db);
770 if (err)
771 goto err_umem;
772
773 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
774 &ncont, NULL);
775 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
776 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
777
27827786
SM
778 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
779 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
e126ba97
EC
780 *cqb = mlx5_vzalloc(*inlen);
781 if (!*cqb) {
782 err = -ENOMEM;
783 goto err_db;
784 }
27827786
SM
785
786 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
787 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
788
789 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
790 MLX5_SET(cqc, cqc, log_page_size,
791 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
e126ba97
EC
792
793 *index = to_mucontext(context)->uuari.uars[0].index;
794
795 return 0;
796
797err_db:
798 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
799
800err_umem:
801 ib_umem_release(cq->buf.umem);
802 return err;
803}
804
805static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
806{
807 mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
808 ib_umem_release(cq->buf.umem);
809}
810
bde51583 811static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
e126ba97
EC
812{
813 int i;
814 void *cqe;
815 struct mlx5_cqe64 *cqe64;
816
bde51583
EC
817 for (i = 0; i < buf->nent; i++) {
818 cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
819 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
820 cqe64->op_own = MLX5_CQE_INVALID << 4;
e126ba97
EC
821 }
822}
823
824static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
825 int entries, int cqe_size,
27827786 826 u32 **cqb, int *index, int *inlen)
e126ba97 827{
27827786
SM
828 __be64 *pas;
829 void *cqc;
e126ba97
EC
830 int err;
831
9603b61d 832 err = mlx5_db_alloc(dev->mdev, &cq->db);
e126ba97
EC
833 if (err)
834 return err;
835
836 cq->mcq.set_ci_db = cq->db.db;
837 cq->mcq.arm_db = cq->db.db + 1;
e126ba97
EC
838 cq->mcq.cqe_sz = cqe_size;
839
840 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
841 if (err)
842 goto err_db;
843
bde51583 844 init_cq_buf(cq, &cq->buf);
e126ba97 845
27827786
SM
846 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
847 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
e126ba97
EC
848 *cqb = mlx5_vzalloc(*inlen);
849 if (!*cqb) {
850 err = -ENOMEM;
851 goto err_buf;
852 }
e126ba97 853
27827786
SM
854 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
855 mlx5_fill_page_array(&cq->buf.buf, pas);
856
857 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
858 MLX5_SET(cqc, cqc, log_page_size,
859 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
860
9603b61d 861 *index = dev->mdev->priv.uuari.uars[0].index;
e126ba97
EC
862
863 return 0;
864
865err_buf:
866 free_cq_buf(dev, &cq->buf);
867
868err_db:
9603b61d 869 mlx5_db_free(dev->mdev, &cq->db);
e126ba97
EC
870 return err;
871}
872
873static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
874{
875 free_cq_buf(dev, &cq->buf);
9603b61d 876 mlx5_db_free(dev->mdev, &cq->db);
e126ba97
EC
877}
878
25361e02
HE
879static void notify_soft_wc_handler(struct work_struct *work)
880{
881 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
882 notify_work);
883
884 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
885}
886
bcf4c1ea
MB
887struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
888 const struct ib_cq_init_attr *attr,
889 struct ib_ucontext *context,
e126ba97
EC
890 struct ib_udata *udata)
891{
bcf4c1ea
MB
892 int entries = attr->cqe;
893 int vector = attr->comp_vector;
e126ba97
EC
894 struct mlx5_ib_dev *dev = to_mdev(ibdev);
895 struct mlx5_ib_cq *cq;
896 int uninitialized_var(index);
897 int uninitialized_var(inlen);
27827786
SM
898 u32 *cqb = NULL;
899 void *cqc;
e126ba97 900 int cqe_size;
0b6e26ce 901 unsigned int irqn;
e126ba97
EC
902 int eqn;
903 int err;
904
9ea57852
NO
905 if (entries < 0 ||
906 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
51ee86a4
EC
907 return ERR_PTR(-EINVAL);
908
34356f64 909 if (check_cq_create_flags(attr->flags))
972ecb82
MB
910 return ERR_PTR(-EOPNOTSUPP);
911
e126ba97 912 entries = roundup_pow_of_two(entries + 1);
938fe83c 913 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
e126ba97
EC
914 return ERR_PTR(-EINVAL);
915
916 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
917 if (!cq)
918 return ERR_PTR(-ENOMEM);
919
920 cq->ibcq.cqe = entries - 1;
921 mutex_init(&cq->resize_mutex);
922 spin_lock_init(&cq->lock);
923 cq->resize_buf = NULL;
924 cq->resize_umem = NULL;
051f2630 925 cq->create_flags = attr->flags;
89ea94a7
MG
926 INIT_LIST_HEAD(&cq->list_send_qp);
927 INIT_LIST_HEAD(&cq->list_recv_qp);
e126ba97
EC
928
929 if (context) {
930 err = create_cq_user(dev, udata, context, cq, entries,
931 &cqb, &cqe_size, &index, &inlen);
932 if (err)
933 goto err_create;
934 } else {
16b0e069 935 cqe_size = cache_line_size() == 128 ? 128 : 64;
e126ba97
EC
936 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
937 &index, &inlen);
938 if (err)
939 goto err_create;
25361e02
HE
940
941 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
e126ba97
EC
942 }
943
233d05d2 944 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
e126ba97
EC
945 if (err)
946 goto err_cqb;
947
27827786
SM
948 cq->cqe_size = cqe_size;
949
950 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
951 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
952 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
953 MLX5_SET(cqc, cqc, uar_page, index);
954 MLX5_SET(cqc, cqc, c_eqn, eqn);
955 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
956 if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
957 MLX5_SET(cqc, cqc, oi, 1);
e126ba97 958
9603b61d 959 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
e126ba97
EC
960 if (err)
961 goto err_cqb;
962
963 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
964 cq->mcq.irqn = irqn;
c16d2750
MB
965 if (context)
966 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
967 else
968 cq->mcq.comp = mlx5_ib_cq_comp;
e126ba97
EC
969 cq->mcq.event = mlx5_ib_cq_event;
970
25361e02
HE
971 INIT_LIST_HEAD(&cq->wc_list);
972
e126ba97
EC
973 if (context)
974 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
975 err = -EFAULT;
976 goto err_cmd;
977 }
978
979
479163f4 980 kvfree(cqb);
e126ba97
EC
981 return &cq->ibcq;
982
983err_cmd:
9603b61d 984 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
e126ba97
EC
985
986err_cqb:
479163f4 987 kvfree(cqb);
e126ba97
EC
988 if (context)
989 destroy_cq_user(cq, context);
990 else
991 destroy_cq_kernel(dev, cq);
992
993err_create:
994 kfree(cq);
995
996 return ERR_PTR(err);
997}
998
999
1000int mlx5_ib_destroy_cq(struct ib_cq *cq)
1001{
1002 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1003 struct mlx5_ib_cq *mcq = to_mcq(cq);
1004 struct ib_ucontext *context = NULL;
1005
1006 if (cq->uobject)
1007 context = cq->uobject->context;
1008
9603b61d 1009 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
e126ba97
EC
1010 if (context)
1011 destroy_cq_user(mcq, context);
1012 else
1013 destroy_cq_kernel(dev, mcq);
1014
1015 kfree(mcq);
1016
1017 return 0;
1018}
1019
cfd8f1d4 1020static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
e126ba97 1021{
cfd8f1d4 1022 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
e126ba97
EC
1023}
1024
1025void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1026{
1027 struct mlx5_cqe64 *cqe64, *dest64;
1028 void *cqe, *dest;
1029 u32 prod_index;
1030 int nfreed = 0;
1031 u8 owner_bit;
1032
1033 if (!cq)
1034 return;
1035
1036 /* First we need to find the current producer index, so we
1037 * know where to start cleaning from. It doesn't matter if HW
1038 * adds new entries after this loop -- the QP we're worried
1039 * about is already in RESET, so the new entries won't come
1040 * from our QP and therefore don't need to be checked.
1041 */
1042 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1043 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1044 break;
1045
1046 /* Now sweep backwards through the CQ, removing CQ entries
1047 * that match our QP by copying older entries on top of them.
1048 */
1049 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1050 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1051 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cfd8f1d4
ML
1052 if (is_equal_rsn(cqe64, rsn)) {
1053 if (srq && (ntohl(cqe64->srqn) & 0xffffff))
e126ba97
EC
1054 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1055 ++nfreed;
1056 } else if (nfreed) {
1057 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1058 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1059 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1060 memcpy(dest, cqe, cq->mcq.cqe_sz);
1061 dest64->op_own = owner_bit |
1062 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1063 }
1064 }
1065
1066 if (nfreed) {
1067 cq->mcq.cons_index += nfreed;
1068 /* Make sure update of buffer contents is done before
1069 * updating consumer index.
1070 */
1071 wmb();
1072 mlx5_cq_set_ci(&cq->mcq);
1073 }
1074}
1075
1076void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1077{
1078 if (!cq)
1079 return;
1080
1081 spin_lock_irq(&cq->lock);
1082 __mlx5_ib_cq_clean(cq, qpn, srq);
1083 spin_unlock_irq(&cq->lock);
1084}
1085
1086int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1087{
3bdb31f6
EC
1088 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1089 struct mlx5_ib_cq *mcq = to_mcq(cq);
1090 int err;
3bdb31f6 1091
938fe83c 1092 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
3bdb31f6
EC
1093 return -ENOSYS;
1094
27827786
SM
1095 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1096 cq_period, cq_count);
3bdb31f6
EC
1097 if (err)
1098 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1099
1100 return err;
e126ba97
EC
1101}
1102
bde51583
EC
1103static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1104 int entries, struct ib_udata *udata, int *npas,
1105 int *page_shift, int *cqe_size)
1106{
1107 struct mlx5_ib_resize_cq ucmd;
1108 struct ib_umem *umem;
1109 int err;
1110 int npages;
1111 struct ib_ucontext *context = cq->buf.umem->context;
1112
57761d8d
EC
1113 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1114 if (err)
1115 return err;
1116
1117 if (ucmd.reserved0 || ucmd.reserved1)
1118 return -EINVAL;
bde51583
EC
1119
1120 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
1121 IB_ACCESS_LOCAL_WRITE, 1);
1122 if (IS_ERR(umem)) {
1123 err = PTR_ERR(umem);
1124 return err;
1125 }
1126
1127 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
1128 npas, NULL);
1129
1130 cq->resize_umem = umem;
1131 *cqe_size = ucmd.cqe_size;
1132
1133 return 0;
1134}
1135
1136static void un_resize_user(struct mlx5_ib_cq *cq)
1137{
1138 ib_umem_release(cq->resize_umem);
1139}
1140
1141static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1142 int entries, int cqe_size)
1143{
1144 int err;
1145
1146 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1147 if (!cq->resize_buf)
1148 return -ENOMEM;
1149
1150 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
1151 if (err)
1152 goto ex;
1153
1154 init_cq_buf(cq, cq->resize_buf);
1155
1156 return 0;
1157
1158ex:
1159 kfree(cq->resize_buf);
1160 return err;
1161}
1162
1163static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1164{
1165 free_cq_buf(dev, cq->resize_buf);
1166 cq->resize_buf = NULL;
1167}
1168
1169static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1170{
1171 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1172 struct mlx5_cqe64 *scqe64;
1173 struct mlx5_cqe64 *dcqe64;
1174 void *start_cqe;
1175 void *scqe;
1176 void *dcqe;
1177 int ssize;
1178 int dsize;
1179 int i;
1180 u8 sw_own;
1181
1182 ssize = cq->buf.cqe_size;
1183 dsize = cq->resize_buf->cqe_size;
1184 if (ssize != dsize) {
1185 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1186 return -EINVAL;
1187 }
1188
1189 i = cq->mcq.cons_index;
1190 scqe = get_sw_cqe(cq, i);
1191 scqe64 = ssize == 64 ? scqe : scqe + 64;
1192 start_cqe = scqe;
1193 if (!scqe) {
1194 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1195 return -EINVAL;
1196 }
1197
1198 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1199 dcqe = get_cqe_from_buf(cq->resize_buf,
1200 (i + 1) & (cq->resize_buf->nent),
1201 dsize);
1202 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1203 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1204 memcpy(dcqe, scqe, dsize);
1205 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1206
1207 ++i;
1208 scqe = get_sw_cqe(cq, i);
1209 scqe64 = ssize == 64 ? scqe : scqe + 64;
1210 if (!scqe) {
1211 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1212 return -EINVAL;
1213 }
1214
1215 if (scqe == start_cqe) {
1216 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1217 cq->mcq.cqn);
1218 return -ENOMEM;
1219 }
1220 }
1221 ++cq->mcq.cons_index;
1222 return 0;
1223}
1224
e126ba97
EC
1225int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1226{
bde51583
EC
1227 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1228 struct mlx5_ib_cq *cq = to_mcq(ibcq);
27827786
SM
1229 void *cqc;
1230 u32 *in;
bde51583
EC
1231 int err;
1232 int npas;
27827786 1233 __be64 *pas;
bde51583
EC
1234 int page_shift;
1235 int inlen;
1236 int uninitialized_var(cqe_size);
1237 unsigned long flags;
1238
938fe83c 1239 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
bde51583
EC
1240 pr_info("Firmware does not support resize CQ\n");
1241 return -ENOSYS;
1242 }
1243
3c4c3774
NO
1244 if (entries < 1 ||
1245 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1246 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1247 entries,
1248 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
bde51583 1249 return -EINVAL;
3c4c3774 1250 }
bde51583
EC
1251
1252 entries = roundup_pow_of_two(entries + 1);
3c4c3774 1253 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
bde51583
EC
1254 return -EINVAL;
1255
1256 if (entries == ibcq->cqe + 1)
1257 return 0;
1258
1259 mutex_lock(&cq->resize_mutex);
1260 if (udata) {
1261 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1262 &cqe_size);
1263 } else {
1264 cqe_size = 64;
1265 err = resize_kernel(dev, cq, entries, cqe_size);
1266 if (!err) {
1267 npas = cq->resize_buf->buf.npages;
1268 page_shift = cq->resize_buf->buf.page_shift;
1269 }
1270 }
1271
1272 if (err)
1273 goto ex;
1274
27827786
SM
1275 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1276 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1277
bde51583
EC
1278 in = mlx5_vzalloc(inlen);
1279 if (!in) {
1280 err = -ENOMEM;
1281 goto ex_resize;
1282 }
1283
27827786 1284 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
bde51583
EC
1285 if (udata)
1286 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
27827786 1287 pas, 0);
bde51583 1288 else
27827786
SM
1289 mlx5_fill_page_array(&cq->resize_buf->buf, pas);
1290
1291 MLX5_SET(modify_cq_in, in,
1292 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1293 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1294 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1295 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1296
1297 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1298
1299 MLX5_SET(cqc, cqc, log_page_size,
1300 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1301 MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
1302 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1303
1304 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1305 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
bde51583 1306
9603b61d 1307 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
bde51583
EC
1308 if (err)
1309 goto ex_alloc;
1310
1311 if (udata) {
1312 cq->ibcq.cqe = entries - 1;
1313 ib_umem_release(cq->buf.umem);
1314 cq->buf.umem = cq->resize_umem;
1315 cq->resize_umem = NULL;
1316 } else {
1317 struct mlx5_ib_cq_buf tbuf;
1318 int resized = 0;
1319
1320 spin_lock_irqsave(&cq->lock, flags);
1321 if (cq->resize_buf) {
1322 err = copy_resize_cqes(cq);
1323 if (!err) {
1324 tbuf = cq->buf;
1325 cq->buf = *cq->resize_buf;
1326 kfree(cq->resize_buf);
1327 cq->resize_buf = NULL;
1328 resized = 1;
1329 }
1330 }
1331 cq->ibcq.cqe = entries - 1;
1332 spin_unlock_irqrestore(&cq->lock, flags);
1333 if (resized)
1334 free_cq_buf(dev, &tbuf);
1335 }
1336 mutex_unlock(&cq->resize_mutex);
1337
479163f4 1338 kvfree(in);
bde51583
EC
1339 return 0;
1340
1341ex_alloc:
479163f4 1342 kvfree(in);
bde51583
EC
1343
1344ex_resize:
1345 if (udata)
1346 un_resize_user(cq);
1347 else
1348 un_resize_kernel(dev, cq);
1349ex:
1350 mutex_unlock(&cq->resize_mutex);
1351 return err;
e126ba97
EC
1352}
1353
1354int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1355{
1356 struct mlx5_ib_cq *cq;
1357
1358 if (!ibcq)
1359 return 128;
1360
1361 cq = to_mcq(ibcq);
1362 return cq->cqe_size;
1363}
25361e02
HE
1364
1365/* Called from atomic context */
1366int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1367{
1368 struct mlx5_ib_wc *soft_wc;
1369 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1370 unsigned long flags;
1371
1372 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1373 if (!soft_wc)
1374 return -ENOMEM;
1375
1376 soft_wc->wc = *wc;
1377 spin_lock_irqsave(&cq->lock, flags);
1378 list_add_tail(&soft_wc->list, &cq->wc_list);
1379 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1380 wc->status != IB_WC_SUCCESS) {
1381 cq->notify_flags = 0;
1382 schedule_work(&cq->notify_work);
1383 }
1384 spin_unlock_irqrestore(&cq->lock, flags);
1385
1386 return 0;
1387}