]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/hw/cxgb4/cq.c
Merge remote-tracking branches 'asoc/topic/inntel', 'asoc/topic/input', 'asoc/topic...
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / cxgb4 / cq.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
dd6b0241 36 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
cfdda9d7
SW
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
cfdda9d7
SW
42 int ret;
43
44 wr_len = sizeof *res_wr + sizeof *res;
cfdda9d7
SW
45 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
46
47 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
48 memset(res_wr, 0, wr_len);
49 res_wr->op_nres = cpu_to_be32(
e2ac9628 50 FW_WR_OP_V(FW_RI_RES_WR) |
cf7fe64a 51 FW_RI_RES_WR_NRES_V(1) |
e2ac9628 52 FW_WR_COMPL_F);
cfdda9d7 53 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
6198dd8d 54 res_wr->cookie = (uintptr_t)&wr_wait;
cfdda9d7
SW
55 res = res_wr->res;
56 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
57 res->u.cq.op = FW_RI_RES_OP_RESET;
58 res->u.cq.iqid = cpu_to_be32(cq->cqid);
59
60 c4iw_init_wr_wait(&wr_wait);
61 ret = c4iw_ofld_send(rdev, skb);
62 if (!ret) {
aadc4df3 63 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
cfdda9d7
SW
64 }
65
66 kfree(cq->sw_queue);
67 dma_free_coherent(&(rdev->lldi.pdev->dev),
68 cq->memsize, cq->queue,
f38926aa 69 dma_unmap_addr(cq, mapping));
cfdda9d7
SW
70 c4iw_put_cqid(rdev, cq->cqid, uctx);
71 return ret;
72}
73
74static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
75 struct c4iw_dev_ucontext *uctx)
76{
77 struct fw_ri_res_wr *res_wr;
78 struct fw_ri_res *res;
79 int wr_len;
80 int user = (uctx != &rdev->uctx);
81 struct c4iw_wr_wait wr_wait;
82 int ret;
83 struct sk_buff *skb;
84
85 cq->cqid = c4iw_get_cqid(rdev, uctx);
86 if (!cq->cqid) {
87 ret = -ENOMEM;
88 goto err1;
89 }
90
91 if (!user) {
92 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
93 if (!cq->sw_queue) {
94 ret = -ENOMEM;
95 goto err2;
96 }
97 }
98 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
99 &cq->dma_addr, GFP_KERNEL);
100 if (!cq->queue) {
101 ret = -ENOMEM;
102 goto err3;
103 }
f38926aa 104 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
cfdda9d7
SW
105 memset(cq->queue, 0, cq->memsize);
106
107 /* build fw_ri_res_wr */
108 wr_len = sizeof *res_wr + sizeof *res;
109
d3c814e8 110 skb = alloc_skb(wr_len, GFP_KERNEL);
cfdda9d7
SW
111 if (!skb) {
112 ret = -ENOMEM;
113 goto err4;
114 }
115 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
116
117 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
118 memset(res_wr, 0, wr_len);
119 res_wr->op_nres = cpu_to_be32(
e2ac9628 120 FW_WR_OP_V(FW_RI_RES_WR) |
cf7fe64a 121 FW_RI_RES_WR_NRES_V(1) |
e2ac9628 122 FW_WR_COMPL_F);
cfdda9d7 123 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
6198dd8d 124 res_wr->cookie = (uintptr_t)&wr_wait;
cfdda9d7
SW
125 res = res_wr->res;
126 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
127 res->u.cq.op = FW_RI_RES_OP_WRITE;
128 res->u.cq.iqid = cpu_to_be32(cq->cqid);
129 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
cf7fe64a
HS
130 FW_RI_RES_WR_IQANUS_V(0) |
131 FW_RI_RES_WR_IQANUD_V(1) |
132 FW_RI_RES_WR_IQANDST_F |
133 FW_RI_RES_WR_IQANDSTINDEX_V(
cf38be6d 134 rdev->lldi.ciq_ids[cq->vector]));
cfdda9d7 135 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
cf7fe64a
HS
136 FW_RI_RES_WR_IQDROPRSS_F |
137 FW_RI_RES_WR_IQPCIECH_V(2) |
138 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
139 FW_RI_RES_WR_IQO_F |
140 FW_RI_RES_WR_IQESIZE_V(1));
cfdda9d7
SW
141 res->u.cq.iqsize = cpu_to_be16(cq->size);
142 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
143
144 c4iw_init_wr_wait(&wr_wait);
145
146 ret = c4iw_ofld_send(rdev, skb);
147 if (ret)
148 goto err4;
149 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
aadc4df3 150 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
cfdda9d7
SW
151 if (ret)
152 goto err4;
153
154 cq->gen = 1;
74217d4c 155 cq->gts = rdev->lldi.gts_reg;
cfdda9d7 156 cq->rdev = rdev;
09ece8b9 157
74217d4c
H
158 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
159 &cq->bar2_qid,
160 user ? &cq->bar2_pa : NULL);
32cc92c7 161 if (user && !cq->bar2_pa) {
74217d4c
H
162 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
163 pci_name(rdev->lldi.pdev), cq->cqid);
164 ret = -EINVAL;
165 goto err4;
cfdda9d7
SW
166 }
167 return 0;
168err4:
169 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
f38926aa 170 dma_unmap_addr(cq, mapping));
cfdda9d7
SW
171err3:
172 kfree(cq->sw_queue);
173err2:
174 c4iw_put_cqid(rdev, cq->cqid, uctx);
175err1:
176 return ret;
177}
178
179static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
180{
181 struct t4_cqe cqe;
182
183 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
184 wq, cq, cq->sw_cidx, cq->sw_pidx);
185 memset(&cqe, 0, sizeof(cqe));
a56c66e8
HS
186 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
187 CQE_OPCODE_V(FW_RI_SEND) |
188 CQE_TYPE_V(0) |
189 CQE_SWCQE_V(1) |
190 CQE_QPID_V(wq->sq.qid));
191 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cfdda9d7
SW
192 cq->sw_queue[cq->sw_pidx] = cqe;
193 t4_swcq_produce(cq);
194}
195
196int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
197{
198 int flushed = 0;
199 int in_use = wq->rq.in_use - count;
200
201 BUG_ON(in_use < 0);
202 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
203 wq, cq, wq->rq.in_use, count);
204 while (in_use--) {
205 insert_recv_cqe(wq, cq);
206 flushed++;
207 }
208 return flushed;
209}
210
211static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
212 struct t4_swsqe *swcqe)
213{
214 struct t4_cqe cqe;
215
216 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
217 wq, cq, cq->sw_cidx, cq->sw_pidx);
218 memset(&cqe, 0, sizeof(cqe));
a56c66e8
HS
219 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
220 CQE_OPCODE_V(swcqe->opcode) |
221 CQE_TYPE_V(1) |
222 CQE_SWCQE_V(1) |
223 CQE_QPID_V(wq->sq.qid));
cfdda9d7 224 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
a56c66e8 225 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cfdda9d7
SW
226 cq->sw_queue[cq->sw_pidx] = cqe;
227 t4_swcq_produce(cq);
228}
229
1cf24dce
SW
230static void advance_oldest_read(struct t4_wq *wq);
231
232int c4iw_flush_sq(struct c4iw_qp *qhp)
cfdda9d7
SW
233{
234 int flushed = 0;
1cf24dce
SW
235 struct t4_wq *wq = &qhp->wq;
236 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
237 struct t4_cq *cq = &chp->cq;
238 int idx;
239 struct t4_swsqe *swsqe;
1cf24dce
SW
240
241 if (wq->sq.flush_cidx == -1)
242 wq->sq.flush_cidx = wq->sq.cidx;
243 idx = wq->sq.flush_cidx;
244 BUG_ON(idx >= wq->sq.size);
245 while (idx != wq->sq.pidx) {
b4e2901c
SW
246 swsqe = &wq->sq.sw_sq[idx];
247 BUG_ON(swsqe->flushed);
248 swsqe->flushed = 1;
249 insert_sq_cqe(wq, cq, swsqe);
250 if (wq->sq.oldest_read == swsqe) {
251 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
252 advance_oldest_read(wq);
1cf24dce 253 }
b4e2901c 254 flushed++;
1cf24dce
SW
255 if (++idx == wq->sq.size)
256 idx = 0;
cfdda9d7 257 }
1cf24dce
SW
258 wq->sq.flush_cidx += flushed;
259 if (wq->sq.flush_cidx >= wq->sq.size)
260 wq->sq.flush_cidx -= wq->sq.size;
cfdda9d7
SW
261 return flushed;
262}
263
1cf24dce
SW
264static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
265{
266 struct t4_swsqe *swsqe;
267 int cidx;
268
269 if (wq->sq.flush_cidx == -1)
270 wq->sq.flush_cidx = wq->sq.cidx;
271 cidx = wq->sq.flush_cidx;
272 BUG_ON(cidx > wq->sq.size);
273
274 while (cidx != wq->sq.pidx) {
275 swsqe = &wq->sq.sw_sq[cidx];
276 if (!swsqe->signaled) {
277 if (++cidx == wq->sq.size)
278 cidx = 0;
279 } else if (swsqe->complete) {
280
281 BUG_ON(swsqe->flushed);
282
283 /*
284 * Insert this completed cqe into the swcq.
285 */
286 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
287 __func__, cidx, cq->sw_pidx);
a56c66e8 288 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
1cf24dce
SW
289 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
290 t4_swcq_produce(cq);
291 swsqe->flushed = 1;
292 if (++cidx == wq->sq.size)
293 cidx = 0;
294 wq->sq.flush_cidx = cidx;
295 } else
296 break;
297 }
298}
299
300static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301 struct t4_cqe *read_cqe)
302{
303 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
304 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
a56c66e8
HS
305 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
306 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
307 CQE_OPCODE_V(FW_RI_READ_REQ) |
308 CQE_TYPE_V(1));
1cf24dce
SW
309 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
310}
311
312static void advance_oldest_read(struct t4_wq *wq)
313{
314
315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
316
317 if (rptr == wq->sq.size)
318 rptr = 0;
319 while (rptr != wq->sq.pidx) {
320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
321
322 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
323 return;
324 if (++rptr == wq->sq.size)
325 rptr = 0;
326 }
327 wq->sq.oldest_read = NULL;
328}
329
cfdda9d7
SW
330/*
331 * Move all CQEs from the HWCQ into the SWCQ.
1cf24dce
SW
332 * Deal with out-of-order and/or completions that complete
333 * prior unsignalled WRs.
cfdda9d7 334 */
1cf24dce 335void c4iw_flush_hw_cq(struct c4iw_cq *chp)
cfdda9d7 336{
1cf24dce
SW
337 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
338 struct c4iw_qp *qhp;
339 struct t4_swsqe *swsqe;
cfdda9d7
SW
340 int ret;
341
1cf24dce
SW
342 PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
344
345 /*
346 * This logic is similar to poll_cq(), but not quite the same
347 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
348 * also do any translation magic that poll_cq() normally does.
349 */
cfdda9d7 350 while (!ret) {
1cf24dce
SW
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
352
353 /*
354 * drop CQEs with no associated QP
355 */
356 if (qhp == NULL)
357 goto next_cqe;
358
359 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
360 goto next_cqe;
361
362 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
363
70b9c660
SW
364 /* If we have reached here because of async
365 * event or other error, and have egress error
366 * then drop
367 */
368 if (CQE_TYPE(hw_cqe) == 1)
369 goto next_cqe;
370
371 /* drop peer2peer RTR reads.
1cf24dce
SW
372 */
373 if (CQE_WRID_STAG(hw_cqe) == 1)
374 goto next_cqe;
375
376 /*
377 * Eat completions for unsignaled read WRs.
378 */
379 if (!qhp->wq.sq.oldest_read->signaled) {
380 advance_oldest_read(&qhp->wq);
381 goto next_cqe;
382 }
383
384 /*
385 * Don't write to the HWCQ, create a new read req CQE
386 * in local memory and move it into the swcq.
387 */
388 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
389 hw_cqe = &read_cqe;
390 advance_oldest_read(&qhp->wq);
391 }
392
393 /* if its a SQ completion, then do the magic to move all the
394 * unsignaled and now in-order completions into the swcq.
395 */
396 if (SQ_TYPE(hw_cqe)) {
397 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
398 swsqe->cqe = *hw_cqe;
399 swsqe->complete = 1;
400 flush_completed_wrs(&qhp->wq, &chp->cq);
401 } else {
402 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
403 *swcqe = *hw_cqe;
a56c66e8 404 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
1cf24dce
SW
405 t4_swcq_produce(&chp->cq);
406 }
407next_cqe:
408 t4_hwcq_consume(&chp->cq);
409 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
cfdda9d7
SW
410 }
411}
412
413static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
414{
415 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
416 return 0;
417
418 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
419 return 0;
420
421 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
422 return 0;
423
424 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
425 return 0;
426 return 1;
427}
428
cfdda9d7
SW
429void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
430{
431 struct t4_cqe *cqe;
432 u32 ptr;
433
434 *count = 0;
435 PDBG("%s count zero %d\n", __func__, *count);
436 ptr = cq->sw_cidx;
437 while (ptr != cq->sw_pidx) {
438 cqe = &cq->sw_queue[ptr];
439 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
c34c97ad 440 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
cfdda9d7
SW
441 (*count)++;
442 if (++ptr == cq->size)
443 ptr = 0;
444 }
445 PDBG("%s cq %p count %d\n", __func__, cq, *count);
446}
447
cfdda9d7
SW
448/*
449 * poll_cq
450 *
451 * Caller must:
452 * check the validity of the first CQE,
453 * supply the wq assicated with the qpid.
454 *
455 * credit: cq credit to return to sge.
456 * cqe_flushed: 1 iff the CQE is flushed.
457 * cqe: copy of the polled CQE.
458 *
459 * return value:
460 * 0 CQE returned ok.
461 * -EAGAIN CQE skipped, try again.
462 * -EOVERFLOW CQ overflow detected.
463 */
464static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
465 u8 *cqe_flushed, u64 *cookie, u32 *credit)
466{
467 int ret = 0;
468 struct t4_cqe *hw_cqe, read_cqe;
469
470 *cqe_flushed = 0;
471 *credit = 0;
472 ret = t4_next_cqe(cq, &hw_cqe);
473 if (ret)
474 return ret;
475
476 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
477 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
478 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
479 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
480 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
481 CQE_WRID_LOW(hw_cqe));
482
483 /*
484 * skip cqe's not affiliated with a QP.
485 */
486 if (wq == NULL) {
487 ret = -EAGAIN;
488 goto skip_cqe;
489 }
490
1cf24dce
SW
491 /*
492 * skip hw cqe's if the wq is flushed.
493 */
494 if (wq->flushed && !SW_CQE(hw_cqe)) {
495 ret = -EAGAIN;
496 goto skip_cqe;
497 }
498
499 /*
500 * skip TERMINATE cqes...
501 */
502 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
503 ret = -EAGAIN;
504 goto skip_cqe;
505 }
506
cfdda9d7
SW
507 /*
508 * Gotta tweak READ completions:
509 * 1) the cqe doesn't contain the sq_wptr from the wr.
510 * 2) opcode not reflected from the wr.
511 * 3) read_len not reflected from the wr.
512 * 4) cq_type is RQ_TYPE not SQ_TYPE.
513 */
514 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
515
70b9c660
SW
516 /* If we have reached here because of async
517 * event or other error, and have egress error
518 * then drop
519 */
520 if (CQE_TYPE(hw_cqe) == 1) {
521 if (CQE_STATUS(hw_cqe))
522 t4_set_wq_in_error(wq);
523 ret = -EAGAIN;
524 goto skip_cqe;
525 }
526
527 /* If this is an unsolicited read response, then the read
cfdda9d7
SW
528 * was generated by the kernel driver as part of peer-2-peer
529 * connection setup. So ignore the completion.
530 */
1cf24dce 531 if (CQE_WRID_STAG(hw_cqe) == 1) {
cfdda9d7
SW
532 if (CQE_STATUS(hw_cqe))
533 t4_set_wq_in_error(wq);
534 ret = -EAGAIN;
535 goto skip_cqe;
536 }
537
1cf24dce
SW
538 /*
539 * Eat completions for unsignaled read WRs.
540 */
541 if (!wq->sq.oldest_read->signaled) {
542 advance_oldest_read(wq);
543 ret = -EAGAIN;
544 goto skip_cqe;
545 }
546
cfdda9d7
SW
547 /*
548 * Don't write to the HWCQ, so create a new read req CQE
549 * in local memory.
550 */
551 create_read_req_cqe(wq, hw_cqe, &read_cqe);
552 hw_cqe = &read_cqe;
553 advance_oldest_read(wq);
554 }
555
556 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
1cf24dce 557 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
cfdda9d7 558 t4_set_wq_in_error(wq);
6ff0e343
SW
559 }
560
cfdda9d7
SW
561 /*
562 * RECV completion.
563 */
564 if (RQ_TYPE(hw_cqe)) {
565
566 /*
567 * HW only validates 4 bits of MSN. So we must validate that
568 * the MSN in the SEND is the next expected MSN. If its not,
569 * then we complete this with T4_ERR_MSN and mark the wq in
570 * error.
571 */
572
573 if (t4_rq_empty(wq)) {
574 t4_set_wq_in_error(wq);
575 ret = -EAGAIN;
576 goto skip_cqe;
577 }
578 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
579 t4_set_wq_in_error(wq);
a56c66e8 580 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
cfdda9d7
SW
581 goto proc_cqe;
582 }
583 goto proc_cqe;
584 }
585
586 /*
587 * If we get here its a send completion.
588 *
589 * Handle out of order completion. These get stuffed
590 * in the SW SQ. Then the SW SQ is walked to move any
591 * now in-order completions into the SW CQ. This handles
592 * 2 cases:
593 * 1) reaping unsignaled WRs when the first subsequent
594 * signaled WR is completed.
595 * 2) out of order read completions.
596 */
597 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
598 struct t4_swsqe *swsqe;
599
600 PDBG("%s out of order completion going in sw_sq at idx %u\n",
601 __func__, CQE_WRID_SQ_IDX(hw_cqe));
602 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
603 swsqe->cqe = *hw_cqe;
604 swsqe->complete = 1;
605 ret = -EAGAIN;
606 goto flush_wq;
607 }
608
609proc_cqe:
610 *cqe = *hw_cqe;
611
612 /*
613 * Reap the associated WR(s) that are freed up with this
614 * completion.
615 */
616 if (SQ_TYPE(hw_cqe)) {
1cf24dce 617 int idx = CQE_WRID_SQ_IDX(hw_cqe);
8a9c399e 618 BUG_ON(idx >= wq->sq.size);
1cf24dce
SW
619
620 /*
621 * Account for any unsignaled completions completed by
622 * this signaled completion. In this case, cidx points
623 * to the first unsignaled one, and idx points to the
624 * signaled one. So adjust in_use based on this delta.
625 * if this is not completing any unsigned wrs, then the
27ca34f5 626 * delta will be 0. Handle wrapping also!
1cf24dce 627 */
27ca34f5
SW
628 if (idx < wq->sq.cidx)
629 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
630 else
631 wq->sq.in_use -= idx - wq->sq.cidx;
8a9c399e 632 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
1cf24dce
SW
633
634 wq->sq.cidx = (uint16_t)idx;
cfdda9d7
SW
635 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
636 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
7730b4c7
HS
637 if (c4iw_wr_log)
638 c4iw_log_wr_stats(wq, hw_cqe);
cfdda9d7
SW
639 t4_sq_consume(wq);
640 } else {
641 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
642 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
643 BUG_ON(t4_rq_empty(wq));
7730b4c7
HS
644 if (c4iw_wr_log)
645 c4iw_log_wr_stats(wq, hw_cqe);
cfdda9d7 646 t4_rq_consume(wq);
1cf24dce 647 goto skip_cqe;
cfdda9d7
SW
648 }
649
650flush_wq:
651 /*
652 * Flush any completed cqes that are now in-order.
653 */
654 flush_completed_wrs(wq, cq);
655
656skip_cqe:
657 if (SW_CQE(hw_cqe)) {
658 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
659 __func__, cq, cq->cqid, cq->sw_cidx);
660 t4_swcq_consume(cq);
661 } else {
662 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
663 __func__, cq, cq->cqid, cq->cidx);
664 t4_hwcq_consume(cq);
665 }
666 return ret;
667}
668
669/*
670 * Get one cq entry from c4iw and map it to openib.
671 *
672 * Returns:
673 * 0 cqe returned
674 * -ENODATA EMPTY;
675 * -EAGAIN caller must try again
676 * any other -errno fatal error
677 */
678static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
679{
680 struct c4iw_qp *qhp = NULL;
97df1c67 681 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
cfdda9d7
SW
682 struct t4_wq *wq;
683 u32 credit = 0;
684 u8 cqe_flushed;
685 u64 cookie = 0;
686 int ret;
687
688 ret = t4_next_cqe(&chp->cq, &rd_cqe);
689
690 if (ret)
691 return ret;
692
693 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
694 if (!qhp)
695 wq = NULL;
696 else {
697 spin_lock(&qhp->lock);
698 wq = &(qhp->wq);
699 }
700 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
701 if (ret)
702 goto out;
703
704 wc->wr_id = cookie;
705 wc->qp = &qhp->ibqp;
706 wc->vendor_err = CQE_STATUS(&cqe);
707 wc->wc_flags = 0;
708
709 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
710 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
711 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
712 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
713
714 if (CQE_TYPE(&cqe) == 0) {
715 if (!CQE_STATUS(&cqe))
716 wc->byte_len = CQE_LEN(&cqe);
717 else
718 wc->byte_len = 0;
719 wc->opcode = IB_WC_RECV;
720 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
721 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
722 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
723 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5c6b2aaf 724 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
cfdda9d7
SW
725 }
726 } else {
727 switch (CQE_OPCODE(&cqe)) {
728 case FW_RI_RDMA_WRITE:
729 wc->opcode = IB_WC_RDMA_WRITE;
730 break;
731 case FW_RI_READ_REQ:
732 wc->opcode = IB_WC_RDMA_READ;
733 wc->byte_len = CQE_LEN(&cqe);
734 break;
735 case FW_RI_SEND_WITH_INV:
736 case FW_RI_SEND_WITH_SE_INV:
737 wc->opcode = IB_WC_SEND;
738 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
739 break;
740 case FW_RI_SEND:
741 case FW_RI_SEND_WITH_SE:
742 wc->opcode = IB_WC_SEND;
743 break;
cfdda9d7
SW
744
745 case FW_RI_LOCAL_INV:
746 wc->opcode = IB_WC_LOCAL_INV;
747 break;
748 case FW_RI_FAST_REGISTER:
d3cfd002 749 wc->opcode = IB_WC_REG_MR;
49b53a93
SW
750
751 /* Invalidate the MR if the fastreg failed */
752 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
5c6b2aaf
SW
753 c4iw_invalidate_mr(qhp->rhp,
754 CQE_WRID_FR_STAG(&cqe));
cfdda9d7
SW
755 break;
756 default:
757 printk(KERN_ERR MOD "Unexpected opcode %d "
758 "in the CQE received for QPID=0x%0x\n",
759 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
760 ret = -EINVAL;
761 goto out;
762 }
763 }
764
765 if (cqe_flushed)
766 wc->status = IB_WC_WR_FLUSH_ERR;
767 else {
768
769 switch (CQE_STATUS(&cqe)) {
770 case T4_ERR_SUCCESS:
771 wc->status = IB_WC_SUCCESS;
772 break;
773 case T4_ERR_STAG:
774 wc->status = IB_WC_LOC_ACCESS_ERR;
775 break;
776 case T4_ERR_PDID:
777 wc->status = IB_WC_LOC_PROT_ERR;
778 break;
779 case T4_ERR_QPID:
780 case T4_ERR_ACCESS:
781 wc->status = IB_WC_LOC_ACCESS_ERR;
782 break;
783 case T4_ERR_WRAP:
784 wc->status = IB_WC_GENERAL_ERR;
785 break;
786 case T4_ERR_BOUND:
787 wc->status = IB_WC_LOC_LEN_ERR;
788 break;
789 case T4_ERR_INVALIDATE_SHARED_MR:
790 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
791 wc->status = IB_WC_MW_BIND_ERR;
792 break;
793 case T4_ERR_CRC:
794 case T4_ERR_MARKER:
795 case T4_ERR_PDU_LEN_ERR:
796 case T4_ERR_OUT_OF_RQE:
797 case T4_ERR_DDP_VERSION:
798 case T4_ERR_RDMA_VERSION:
799 case T4_ERR_DDP_QUEUE_NUM:
800 case T4_ERR_MSN:
801 case T4_ERR_TBIT:
802 case T4_ERR_MO:
803 case T4_ERR_MSN_RANGE:
804 case T4_ERR_IRD_OVERFLOW:
805 case T4_ERR_OPCODE:
6ff0e343 806 case T4_ERR_INTERNAL_ERR:
cfdda9d7
SW
807 wc->status = IB_WC_FATAL_ERR;
808 break;
809 case T4_ERR_SWFLUSH:
810 wc->status = IB_WC_WR_FLUSH_ERR;
811 break;
812 default:
813 printk(KERN_ERR MOD
814 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
815 CQE_STATUS(&cqe), CQE_QPID(&cqe));
3661df17 816 wc->status = IB_WC_FATAL_ERR;
cfdda9d7
SW
817 }
818 }
819out:
086dc6e3
SW
820 if (wq) {
821 if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
822 if (t4_sq_empty(wq))
823 complete(&qhp->sq_drained);
824 if (t4_rq_empty(wq))
825 complete(&qhp->rq_drained);
826 }
cfdda9d7 827 spin_unlock(&qhp->lock);
086dc6e3 828 }
cfdda9d7
SW
829 return ret;
830}
831
832int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
833{
834 struct c4iw_cq *chp;
835 unsigned long flags;
836 int npolled;
837 int err = 0;
838
839 chp = to_c4iw_cq(ibcq);
840
841 spin_lock_irqsave(&chp->lock, flags);
842 for (npolled = 0; npolled < num_entries; ++npolled) {
843 do {
844 err = c4iw_poll_cq_one(chp, wc + npolled);
845 } while (err == -EAGAIN);
846 if (err)
847 break;
848 }
849 spin_unlock_irqrestore(&chp->lock, flags);
850 return !err || err == -ENODATA ? npolled : err;
851}
852
853int c4iw_destroy_cq(struct ib_cq *ib_cq)
854{
855 struct c4iw_cq *chp;
856 struct c4iw_ucontext *ucontext;
857
858 PDBG("%s ib_cq %p\n", __func__, ib_cq);
859 chp = to_c4iw_cq(ib_cq);
860
861 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
862 atomic_dec(&chp->refcnt);
863 wait_event(chp->wait, !atomic_read(&chp->refcnt));
864
865 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
866 : NULL;
867 destroy_cq(&chp->rhp->rdev, &chp->cq,
dd6b0241
H
868 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
869 chp->destroy_skb);
870 chp->destroy_skb = NULL;
cfdda9d7
SW
871 kfree(chp);
872 return 0;
873}
874
bcf4c1ea
MB
875struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
876 const struct ib_cq_init_attr *attr,
877 struct ib_ucontext *ib_context,
cfdda9d7
SW
878 struct ib_udata *udata)
879{
bcf4c1ea
MB
880 int entries = attr->cqe;
881 int vector = attr->comp_vector;
cfdda9d7
SW
882 struct c4iw_dev *rhp;
883 struct c4iw_cq *chp;
884 struct c4iw_create_cq_resp uresp;
885 struct c4iw_ucontext *ucontext = NULL;
dd6b0241 886 int ret, wr_len;
1973e8b8 887 size_t memsize, hwentries;
cfdda9d7
SW
888 struct c4iw_mm_entry *mm, *mm2;
889
890 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
bcf4c1ea
MB
891 if (attr->flags)
892 return ERR_PTR(-EINVAL);
cfdda9d7
SW
893
894 rhp = to_c4iw_dev(ibdev);
895
cf38be6d
HS
896 if (vector >= rhp->rdev.lldi.nciq)
897 return ERR_PTR(-EINVAL);
898
cfdda9d7
SW
899 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
900 if (!chp)
901 return ERR_PTR(-ENOMEM);
902
dd6b0241
H
903 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
904 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
905 if (!chp->destroy_skb) {
906 ret = -ENOMEM;
907 goto err1;
908 }
909
cfdda9d7
SW
910 if (ib_context)
911 ucontext = to_c4iw_ucontext(ib_context);
912
913 /* account for the status page. */
914 entries++;
915
895cf5f3
SW
916 /* IQ needs one extra entry to differentiate full vs empty. */
917 entries++;
918
cfdda9d7
SW
919 /*
920 * entries must be multiple of 16 for HW.
921 */
922 entries = roundup(entries, 16);
1973e8b8
SW
923
924 /*
925 * Make actual HW queue 2x to avoid cdix_inc overflows.
926 */
04e10e21 927 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
1973e8b8
SW
928
929 /*
930 * Make HW queue at least 64 entries so GTS updates aren't too
931 * frequent.
932 */
933 if (hwentries < 64)
934 hwentries = 64;
935
936 memsize = hwentries * sizeof *chp->cq.queue;
cfdda9d7
SW
937
938 /*
939 * memsize must be a multiple of the page size if its a user cq.
940 */
66eb19af 941 if (ucontext)
cfdda9d7 942 memsize = roundup(memsize, PAGE_SIZE);
1973e8b8 943 chp->cq.size = hwentries;
cfdda9d7 944 chp->cq.memsize = memsize;
cf38be6d 945 chp->cq.vector = vector;
cfdda9d7
SW
946
947 ret = create_cq(&rhp->rdev, &chp->cq,
948 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
949 if (ret)
dd6b0241 950 goto err2;
cfdda9d7
SW
951
952 chp->rhp = rhp;
953 chp->cq.size--; /* status page */
1973e8b8 954 chp->ibcq.cqe = entries - 2;
cfdda9d7 955 spin_lock_init(&chp->lock);
581bbe2c 956 spin_lock_init(&chp->comp_handler_lock);
cfdda9d7
SW
957 atomic_set(&chp->refcnt, 1);
958 init_waitqueue_head(&chp->wait);
959 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
960 if (ret)
dd6b0241 961 goto err3;
cfdda9d7
SW
962
963 if (ucontext) {
964 mm = kmalloc(sizeof *mm, GFP_KERNEL);
965 if (!mm)
dd6b0241 966 goto err4;
cfdda9d7
SW
967 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
968 if (!mm2)
dd6b0241 969 goto err5;
cfdda9d7
SW
970
971 uresp.qid_mask = rhp->rdev.cqmask;
972 uresp.cqid = chp->cq.cqid;
973 uresp.size = chp->cq.size;
974 uresp.memsize = chp->cq.memsize;
975 spin_lock(&ucontext->mmap_lock);
976 uresp.key = ucontext->key;
977 ucontext->key += PAGE_SIZE;
978 uresp.gts_key = ucontext->key;
979 ucontext->key += PAGE_SIZE;
980 spin_unlock(&ucontext->mmap_lock);
b6f04d3d
YD
981 ret = ib_copy_to_udata(udata, &uresp,
982 sizeof(uresp) - sizeof(uresp.reserved));
cfdda9d7 983 if (ret)
dd6b0241 984 goto err6;
cfdda9d7
SW
985
986 mm->key = uresp.key;
987 mm->addr = virt_to_phys(chp->cq.queue);
988 mm->len = chp->cq.memsize;
989 insert_mmap(ucontext, mm);
990
991 mm2->key = uresp.gts_key;
74217d4c 992 mm2->addr = chp->cq.bar2_pa;
cfdda9d7
SW
993 mm2->len = PAGE_SIZE;
994 insert_mmap(ucontext, mm2);
995 }
996 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
997 __func__, chp->cq.cqid, chp, chp->cq.size,
6198dd8d 998 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
cfdda9d7 999 return &chp->ibcq;
dd6b0241 1000err6:
cfdda9d7 1001 kfree(mm2);
dd6b0241 1002err5:
cfdda9d7 1003 kfree(mm);
dd6b0241 1004err4:
cfdda9d7 1005 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
dd6b0241 1006err3:
cfdda9d7 1007 destroy_cq(&chp->rhp->rdev, &chp->cq,
dd6b0241
H
1008 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1009 chp->destroy_skb);
1010err2:
1011 kfree_skb(chp->destroy_skb);
cfdda9d7
SW
1012err1:
1013 kfree(chp);
1014 return ERR_PTR(ret);
1015}
1016
1017int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1018{
1019 return -ENOSYS;
1020}
1021
1022int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1023{
1024 struct c4iw_cq *chp;
cff069b7 1025 int ret = 0;
cfdda9d7
SW
1026 unsigned long flag;
1027
1028 chp = to_c4iw_cq(ibcq);
1029 spin_lock_irqsave(&chp->lock, flag);
cff069b7
BP
1030 t4_arm_cq(&chp->cq,
1031 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1032 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1033 ret = t4_cq_notempty(&chp->cq);
cfdda9d7 1034 spin_unlock_irqrestore(&chp->lock, flag);
cfdda9d7
SW
1035 return ret;
1036}