]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/hw/cxgb4/cq.c
cxgb4: Use more common logging style
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / hw / cxgb4 / cq.c
1 /*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "iw_cxgb4.h"
34
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
37 {
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 int ret;
43
44 wr_len = sizeof *res_wr + sizeof *res;
45 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
46
47 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
48 memset(res_wr, 0, wr_len);
49 res_wr->op_nres = cpu_to_be32(
50 FW_WR_OP_V(FW_RI_RES_WR) |
51 FW_RI_RES_WR_NRES_V(1) |
52 FW_WR_COMPL_F);
53 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
54 res_wr->cookie = (uintptr_t)&wr_wait;
55 res = res_wr->res;
56 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
57 res->u.cq.op = FW_RI_RES_OP_RESET;
58 res->u.cq.iqid = cpu_to_be32(cq->cqid);
59
60 c4iw_init_wr_wait(&wr_wait);
61 ret = c4iw_ofld_send(rdev, skb);
62 if (!ret) {
63 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
64 }
65
66 kfree(cq->sw_queue);
67 dma_free_coherent(&(rdev->lldi.pdev->dev),
68 cq->memsize, cq->queue,
69 dma_unmap_addr(cq, mapping));
70 c4iw_put_cqid(rdev, cq->cqid, uctx);
71 return ret;
72 }
73
74 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
75 struct c4iw_dev_ucontext *uctx)
76 {
77 struct fw_ri_res_wr *res_wr;
78 struct fw_ri_res *res;
79 int wr_len;
80 int user = (uctx != &rdev->uctx);
81 struct c4iw_wr_wait wr_wait;
82 int ret;
83 struct sk_buff *skb;
84
85 cq->cqid = c4iw_get_cqid(rdev, uctx);
86 if (!cq->cqid) {
87 ret = -ENOMEM;
88 goto err1;
89 }
90
91 if (!user) {
92 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
93 if (!cq->sw_queue) {
94 ret = -ENOMEM;
95 goto err2;
96 }
97 }
98 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
99 &cq->dma_addr, GFP_KERNEL);
100 if (!cq->queue) {
101 ret = -ENOMEM;
102 goto err3;
103 }
104 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
105 memset(cq->queue, 0, cq->memsize);
106
107 /* build fw_ri_res_wr */
108 wr_len = sizeof *res_wr + sizeof *res;
109
110 skb = alloc_skb(wr_len, GFP_KERNEL);
111 if (!skb) {
112 ret = -ENOMEM;
113 goto err4;
114 }
115 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
116
117 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
118 memset(res_wr, 0, wr_len);
119 res_wr->op_nres = cpu_to_be32(
120 FW_WR_OP_V(FW_RI_RES_WR) |
121 FW_RI_RES_WR_NRES_V(1) |
122 FW_WR_COMPL_F);
123 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
124 res_wr->cookie = (uintptr_t)&wr_wait;
125 res = res_wr->res;
126 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
127 res->u.cq.op = FW_RI_RES_OP_WRITE;
128 res->u.cq.iqid = cpu_to_be32(cq->cqid);
129 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
130 FW_RI_RES_WR_IQANUS_V(0) |
131 FW_RI_RES_WR_IQANUD_V(1) |
132 FW_RI_RES_WR_IQANDST_F |
133 FW_RI_RES_WR_IQANDSTINDEX_V(
134 rdev->lldi.ciq_ids[cq->vector]));
135 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
136 FW_RI_RES_WR_IQDROPRSS_F |
137 FW_RI_RES_WR_IQPCIECH_V(2) |
138 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
139 FW_RI_RES_WR_IQO_F |
140 FW_RI_RES_WR_IQESIZE_V(1));
141 res->u.cq.iqsize = cpu_to_be16(cq->size);
142 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
143
144 c4iw_init_wr_wait(&wr_wait);
145
146 ret = c4iw_ofld_send(rdev, skb);
147 if (ret)
148 goto err4;
149 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
150 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
151 if (ret)
152 goto err4;
153
154 cq->gen = 1;
155 cq->gts = rdev->lldi.gts_reg;
156 cq->rdev = rdev;
157
158 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
159 &cq->bar2_qid,
160 user ? &cq->bar2_pa : NULL);
161 if (user && !cq->bar2_pa) {
162 pr_warn("%s: cqid %u not in BAR2 range\n",
163 pci_name(rdev->lldi.pdev), cq->cqid);
164 ret = -EINVAL;
165 goto err4;
166 }
167 return 0;
168 err4:
169 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
170 dma_unmap_addr(cq, mapping));
171 err3:
172 kfree(cq->sw_queue);
173 err2:
174 c4iw_put_cqid(rdev, cq->cqid, uctx);
175 err1:
176 return ret;
177 }
178
179 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
180 {
181 struct t4_cqe cqe;
182
183 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
184 wq, cq, cq->sw_cidx, cq->sw_pidx);
185 memset(&cqe, 0, sizeof(cqe));
186 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
187 CQE_OPCODE_V(FW_RI_SEND) |
188 CQE_TYPE_V(0) |
189 CQE_SWCQE_V(1) |
190 CQE_QPID_V(wq->sq.qid));
191 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
192 cq->sw_queue[cq->sw_pidx] = cqe;
193 t4_swcq_produce(cq);
194 }
195
196 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
197 {
198 int flushed = 0;
199 int in_use = wq->rq.in_use - count;
200
201 BUG_ON(in_use < 0);
202 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
203 wq, cq, wq->rq.in_use, count);
204 while (in_use--) {
205 insert_recv_cqe(wq, cq);
206 flushed++;
207 }
208 return flushed;
209 }
210
211 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
212 struct t4_swsqe *swcqe)
213 {
214 struct t4_cqe cqe;
215
216 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
217 wq, cq, cq->sw_cidx, cq->sw_pidx);
218 memset(&cqe, 0, sizeof(cqe));
219 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
220 CQE_OPCODE_V(swcqe->opcode) |
221 CQE_TYPE_V(1) |
222 CQE_SWCQE_V(1) |
223 CQE_QPID_V(wq->sq.qid));
224 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
225 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
226 cq->sw_queue[cq->sw_pidx] = cqe;
227 t4_swcq_produce(cq);
228 }
229
230 static void advance_oldest_read(struct t4_wq *wq);
231
232 int c4iw_flush_sq(struct c4iw_qp *qhp)
233 {
234 int flushed = 0;
235 struct t4_wq *wq = &qhp->wq;
236 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
237 struct t4_cq *cq = &chp->cq;
238 int idx;
239 struct t4_swsqe *swsqe;
240
241 if (wq->sq.flush_cidx == -1)
242 wq->sq.flush_cidx = wq->sq.cidx;
243 idx = wq->sq.flush_cidx;
244 BUG_ON(idx >= wq->sq.size);
245 while (idx != wq->sq.pidx) {
246 swsqe = &wq->sq.sw_sq[idx];
247 BUG_ON(swsqe->flushed);
248 swsqe->flushed = 1;
249 insert_sq_cqe(wq, cq, swsqe);
250 if (wq->sq.oldest_read == swsqe) {
251 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
252 advance_oldest_read(wq);
253 }
254 flushed++;
255 if (++idx == wq->sq.size)
256 idx = 0;
257 }
258 wq->sq.flush_cidx += flushed;
259 if (wq->sq.flush_cidx >= wq->sq.size)
260 wq->sq.flush_cidx -= wq->sq.size;
261 return flushed;
262 }
263
264 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
265 {
266 struct t4_swsqe *swsqe;
267 int cidx;
268
269 if (wq->sq.flush_cidx == -1)
270 wq->sq.flush_cidx = wq->sq.cidx;
271 cidx = wq->sq.flush_cidx;
272 BUG_ON(cidx > wq->sq.size);
273
274 while (cidx != wq->sq.pidx) {
275 swsqe = &wq->sq.sw_sq[cidx];
276 if (!swsqe->signaled) {
277 if (++cidx == wq->sq.size)
278 cidx = 0;
279 } else if (swsqe->complete) {
280
281 BUG_ON(swsqe->flushed);
282
283 /*
284 * Insert this completed cqe into the swcq.
285 */
286 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
287 __func__, cidx, cq->sw_pidx);
288 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
289 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
290 t4_swcq_produce(cq);
291 swsqe->flushed = 1;
292 if (++cidx == wq->sq.size)
293 cidx = 0;
294 wq->sq.flush_cidx = cidx;
295 } else
296 break;
297 }
298 }
299
300 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301 struct t4_cqe *read_cqe)
302 {
303 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
304 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
305 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
306 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
307 CQE_OPCODE_V(FW_RI_READ_REQ) |
308 CQE_TYPE_V(1));
309 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
310 }
311
312 static void advance_oldest_read(struct t4_wq *wq)
313 {
314
315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
316
317 if (rptr == wq->sq.size)
318 rptr = 0;
319 while (rptr != wq->sq.pidx) {
320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
321
322 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
323 return;
324 if (++rptr == wq->sq.size)
325 rptr = 0;
326 }
327 wq->sq.oldest_read = NULL;
328 }
329
330 /*
331 * Move all CQEs from the HWCQ into the SWCQ.
332 * Deal with out-of-order and/or completions that complete
333 * prior unsignalled WRs.
334 */
335 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
336 {
337 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
338 struct c4iw_qp *qhp;
339 struct t4_swsqe *swsqe;
340 int ret;
341
342 PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
344
345 /*
346 * This logic is similar to poll_cq(), but not quite the same
347 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
348 * also do any translation magic that poll_cq() normally does.
349 */
350 while (!ret) {
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
352
353 /*
354 * drop CQEs with no associated QP
355 */
356 if (qhp == NULL)
357 goto next_cqe;
358
359 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
360 goto next_cqe;
361
362 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
363
364 /* If we have reached here because of async
365 * event or other error, and have egress error
366 * then drop
367 */
368 if (CQE_TYPE(hw_cqe) == 1)
369 goto next_cqe;
370
371 /* drop peer2peer RTR reads.
372 */
373 if (CQE_WRID_STAG(hw_cqe) == 1)
374 goto next_cqe;
375
376 /*
377 * Eat completions for unsignaled read WRs.
378 */
379 if (!qhp->wq.sq.oldest_read->signaled) {
380 advance_oldest_read(&qhp->wq);
381 goto next_cqe;
382 }
383
384 /*
385 * Don't write to the HWCQ, create a new read req CQE
386 * in local memory and move it into the swcq.
387 */
388 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
389 hw_cqe = &read_cqe;
390 advance_oldest_read(&qhp->wq);
391 }
392
393 /* if its a SQ completion, then do the magic to move all the
394 * unsignaled and now in-order completions into the swcq.
395 */
396 if (SQ_TYPE(hw_cqe)) {
397 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
398 swsqe->cqe = *hw_cqe;
399 swsqe->complete = 1;
400 flush_completed_wrs(&qhp->wq, &chp->cq);
401 } else {
402 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
403 *swcqe = *hw_cqe;
404 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
405 t4_swcq_produce(&chp->cq);
406 }
407 next_cqe:
408 t4_hwcq_consume(&chp->cq);
409 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
410 }
411 }
412
413 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
414 {
415 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
416 return 0;
417
418 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
419 return 0;
420
421 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
422 return 0;
423
424 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
425 return 0;
426 return 1;
427 }
428
429 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
430 {
431 struct t4_cqe *cqe;
432 u32 ptr;
433
434 *count = 0;
435 PDBG("%s count zero %d\n", __func__, *count);
436 ptr = cq->sw_cidx;
437 while (ptr != cq->sw_pidx) {
438 cqe = &cq->sw_queue[ptr];
439 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
440 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
441 (*count)++;
442 if (++ptr == cq->size)
443 ptr = 0;
444 }
445 PDBG("%s cq %p count %d\n", __func__, cq, *count);
446 }
447
448 /*
449 * poll_cq
450 *
451 * Caller must:
452 * check the validity of the first CQE,
453 * supply the wq assicated with the qpid.
454 *
455 * credit: cq credit to return to sge.
456 * cqe_flushed: 1 iff the CQE is flushed.
457 * cqe: copy of the polled CQE.
458 *
459 * return value:
460 * 0 CQE returned ok.
461 * -EAGAIN CQE skipped, try again.
462 * -EOVERFLOW CQ overflow detected.
463 */
464 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
465 u8 *cqe_flushed, u64 *cookie, u32 *credit)
466 {
467 int ret = 0;
468 struct t4_cqe *hw_cqe, read_cqe;
469
470 *cqe_flushed = 0;
471 *credit = 0;
472 ret = t4_next_cqe(cq, &hw_cqe);
473 if (ret)
474 return ret;
475
476 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
477 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
478 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
479 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
480 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
481 CQE_WRID_LOW(hw_cqe));
482
483 /*
484 * skip cqe's not affiliated with a QP.
485 */
486 if (wq == NULL) {
487 ret = -EAGAIN;
488 goto skip_cqe;
489 }
490
491 /*
492 * skip hw cqe's if the wq is flushed.
493 */
494 if (wq->flushed && !SW_CQE(hw_cqe)) {
495 ret = -EAGAIN;
496 goto skip_cqe;
497 }
498
499 /*
500 * skip TERMINATE cqes...
501 */
502 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
503 ret = -EAGAIN;
504 goto skip_cqe;
505 }
506
507 /*
508 * Special cqe for drain WR completions...
509 */
510 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
511 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
512 *cqe = *hw_cqe;
513 goto skip_cqe;
514 }
515
516 /*
517 * Gotta tweak READ completions:
518 * 1) the cqe doesn't contain the sq_wptr from the wr.
519 * 2) opcode not reflected from the wr.
520 * 3) read_len not reflected from the wr.
521 * 4) cq_type is RQ_TYPE not SQ_TYPE.
522 */
523 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
524
525 /* If we have reached here because of async
526 * event or other error, and have egress error
527 * then drop
528 */
529 if (CQE_TYPE(hw_cqe) == 1) {
530 if (CQE_STATUS(hw_cqe))
531 t4_set_wq_in_error(wq);
532 ret = -EAGAIN;
533 goto skip_cqe;
534 }
535
536 /* If this is an unsolicited read response, then the read
537 * was generated by the kernel driver as part of peer-2-peer
538 * connection setup. So ignore the completion.
539 */
540 if (CQE_WRID_STAG(hw_cqe) == 1) {
541 if (CQE_STATUS(hw_cqe))
542 t4_set_wq_in_error(wq);
543 ret = -EAGAIN;
544 goto skip_cqe;
545 }
546
547 /*
548 * Eat completions for unsignaled read WRs.
549 */
550 if (!wq->sq.oldest_read->signaled) {
551 advance_oldest_read(wq);
552 ret = -EAGAIN;
553 goto skip_cqe;
554 }
555
556 /*
557 * Don't write to the HWCQ, so create a new read req CQE
558 * in local memory.
559 */
560 create_read_req_cqe(wq, hw_cqe, &read_cqe);
561 hw_cqe = &read_cqe;
562 advance_oldest_read(wq);
563 }
564
565 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
566 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
567 t4_set_wq_in_error(wq);
568 }
569
570 /*
571 * RECV completion.
572 */
573 if (RQ_TYPE(hw_cqe)) {
574
575 /*
576 * HW only validates 4 bits of MSN. So we must validate that
577 * the MSN in the SEND is the next expected MSN. If its not,
578 * then we complete this with T4_ERR_MSN and mark the wq in
579 * error.
580 */
581
582 if (t4_rq_empty(wq)) {
583 t4_set_wq_in_error(wq);
584 ret = -EAGAIN;
585 goto skip_cqe;
586 }
587 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
588 t4_set_wq_in_error(wq);
589 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
590 goto proc_cqe;
591 }
592 goto proc_cqe;
593 }
594
595 /*
596 * If we get here its a send completion.
597 *
598 * Handle out of order completion. These get stuffed
599 * in the SW SQ. Then the SW SQ is walked to move any
600 * now in-order completions into the SW CQ. This handles
601 * 2 cases:
602 * 1) reaping unsignaled WRs when the first subsequent
603 * signaled WR is completed.
604 * 2) out of order read completions.
605 */
606 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
607 struct t4_swsqe *swsqe;
608
609 PDBG("%s out of order completion going in sw_sq at idx %u\n",
610 __func__, CQE_WRID_SQ_IDX(hw_cqe));
611 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
612 swsqe->cqe = *hw_cqe;
613 swsqe->complete = 1;
614 ret = -EAGAIN;
615 goto flush_wq;
616 }
617
618 proc_cqe:
619 *cqe = *hw_cqe;
620
621 /*
622 * Reap the associated WR(s) that are freed up with this
623 * completion.
624 */
625 if (SQ_TYPE(hw_cqe)) {
626 int idx = CQE_WRID_SQ_IDX(hw_cqe);
627 BUG_ON(idx >= wq->sq.size);
628
629 /*
630 * Account for any unsignaled completions completed by
631 * this signaled completion. In this case, cidx points
632 * to the first unsignaled one, and idx points to the
633 * signaled one. So adjust in_use based on this delta.
634 * if this is not completing any unsigned wrs, then the
635 * delta will be 0. Handle wrapping also!
636 */
637 if (idx < wq->sq.cidx)
638 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
639 else
640 wq->sq.in_use -= idx - wq->sq.cidx;
641 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
642
643 wq->sq.cidx = (uint16_t)idx;
644 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
645 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
646 if (c4iw_wr_log)
647 c4iw_log_wr_stats(wq, hw_cqe);
648 t4_sq_consume(wq);
649 } else {
650 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
651 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
652 BUG_ON(t4_rq_empty(wq));
653 if (c4iw_wr_log)
654 c4iw_log_wr_stats(wq, hw_cqe);
655 t4_rq_consume(wq);
656 goto skip_cqe;
657 }
658
659 flush_wq:
660 /*
661 * Flush any completed cqes that are now in-order.
662 */
663 flush_completed_wrs(wq, cq);
664
665 skip_cqe:
666 if (SW_CQE(hw_cqe)) {
667 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
668 __func__, cq, cq->cqid, cq->sw_cidx);
669 t4_swcq_consume(cq);
670 } else {
671 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
672 __func__, cq, cq->cqid, cq->cidx);
673 t4_hwcq_consume(cq);
674 }
675 return ret;
676 }
677
678 /*
679 * Get one cq entry from c4iw and map it to openib.
680 *
681 * Returns:
682 * 0 cqe returned
683 * -ENODATA EMPTY;
684 * -EAGAIN caller must try again
685 * any other -errno fatal error
686 */
687 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
688 {
689 struct c4iw_qp *qhp = NULL;
690 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
691 struct t4_wq *wq;
692 u32 credit = 0;
693 u8 cqe_flushed;
694 u64 cookie = 0;
695 int ret;
696
697 ret = t4_next_cqe(&chp->cq, &rd_cqe);
698
699 if (ret)
700 return ret;
701
702 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
703 if (!qhp)
704 wq = NULL;
705 else {
706 spin_lock(&qhp->lock);
707 wq = &(qhp->wq);
708 }
709 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
710 if (ret)
711 goto out;
712
713 wc->wr_id = cookie;
714 wc->qp = &qhp->ibqp;
715 wc->vendor_err = CQE_STATUS(&cqe);
716 wc->wc_flags = 0;
717
718 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
719 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
720 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
721 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
722
723 if (CQE_TYPE(&cqe) == 0) {
724 if (!CQE_STATUS(&cqe))
725 wc->byte_len = CQE_LEN(&cqe);
726 else
727 wc->byte_len = 0;
728 wc->opcode = IB_WC_RECV;
729 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
730 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
731 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
732 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
733 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
734 }
735 } else {
736 switch (CQE_OPCODE(&cqe)) {
737 case FW_RI_RDMA_WRITE:
738 wc->opcode = IB_WC_RDMA_WRITE;
739 break;
740 case FW_RI_READ_REQ:
741 wc->opcode = IB_WC_RDMA_READ;
742 wc->byte_len = CQE_LEN(&cqe);
743 break;
744 case FW_RI_SEND_WITH_INV:
745 case FW_RI_SEND_WITH_SE_INV:
746 wc->opcode = IB_WC_SEND;
747 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
748 break;
749 case FW_RI_SEND:
750 case FW_RI_SEND_WITH_SE:
751 wc->opcode = IB_WC_SEND;
752 break;
753
754 case FW_RI_LOCAL_INV:
755 wc->opcode = IB_WC_LOCAL_INV;
756 break;
757 case FW_RI_FAST_REGISTER:
758 wc->opcode = IB_WC_REG_MR;
759
760 /* Invalidate the MR if the fastreg failed */
761 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
762 c4iw_invalidate_mr(qhp->rhp,
763 CQE_WRID_FR_STAG(&cqe));
764 break;
765 case C4IW_DRAIN_OPCODE:
766 wc->opcode = IB_WC_SEND;
767 break;
768 default:
769 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
770 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
771 ret = -EINVAL;
772 goto out;
773 }
774 }
775
776 if (cqe_flushed)
777 wc->status = IB_WC_WR_FLUSH_ERR;
778 else {
779
780 switch (CQE_STATUS(&cqe)) {
781 case T4_ERR_SUCCESS:
782 wc->status = IB_WC_SUCCESS;
783 break;
784 case T4_ERR_STAG:
785 wc->status = IB_WC_LOC_ACCESS_ERR;
786 break;
787 case T4_ERR_PDID:
788 wc->status = IB_WC_LOC_PROT_ERR;
789 break;
790 case T4_ERR_QPID:
791 case T4_ERR_ACCESS:
792 wc->status = IB_WC_LOC_ACCESS_ERR;
793 break;
794 case T4_ERR_WRAP:
795 wc->status = IB_WC_GENERAL_ERR;
796 break;
797 case T4_ERR_BOUND:
798 wc->status = IB_WC_LOC_LEN_ERR;
799 break;
800 case T4_ERR_INVALIDATE_SHARED_MR:
801 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
802 wc->status = IB_WC_MW_BIND_ERR;
803 break;
804 case T4_ERR_CRC:
805 case T4_ERR_MARKER:
806 case T4_ERR_PDU_LEN_ERR:
807 case T4_ERR_OUT_OF_RQE:
808 case T4_ERR_DDP_VERSION:
809 case T4_ERR_RDMA_VERSION:
810 case T4_ERR_DDP_QUEUE_NUM:
811 case T4_ERR_MSN:
812 case T4_ERR_TBIT:
813 case T4_ERR_MO:
814 case T4_ERR_MSN_RANGE:
815 case T4_ERR_IRD_OVERFLOW:
816 case T4_ERR_OPCODE:
817 case T4_ERR_INTERNAL_ERR:
818 wc->status = IB_WC_FATAL_ERR;
819 break;
820 case T4_ERR_SWFLUSH:
821 wc->status = IB_WC_WR_FLUSH_ERR;
822 break;
823 default:
824 pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
825 CQE_STATUS(&cqe), CQE_QPID(&cqe));
826 wc->status = IB_WC_FATAL_ERR;
827 }
828 }
829 out:
830 if (wq)
831 spin_unlock(&qhp->lock);
832 return ret;
833 }
834
835 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
836 {
837 struct c4iw_cq *chp;
838 unsigned long flags;
839 int npolled;
840 int err = 0;
841
842 chp = to_c4iw_cq(ibcq);
843
844 spin_lock_irqsave(&chp->lock, flags);
845 for (npolled = 0; npolled < num_entries; ++npolled) {
846 do {
847 err = c4iw_poll_cq_one(chp, wc + npolled);
848 } while (err == -EAGAIN);
849 if (err)
850 break;
851 }
852 spin_unlock_irqrestore(&chp->lock, flags);
853 return !err || err == -ENODATA ? npolled : err;
854 }
855
856 int c4iw_destroy_cq(struct ib_cq *ib_cq)
857 {
858 struct c4iw_cq *chp;
859 struct c4iw_ucontext *ucontext;
860
861 PDBG("%s ib_cq %p\n", __func__, ib_cq);
862 chp = to_c4iw_cq(ib_cq);
863
864 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
865 atomic_dec(&chp->refcnt);
866 wait_event(chp->wait, !atomic_read(&chp->refcnt));
867
868 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
869 : NULL;
870 destroy_cq(&chp->rhp->rdev, &chp->cq,
871 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
872 chp->destroy_skb);
873 chp->destroy_skb = NULL;
874 kfree(chp);
875 return 0;
876 }
877
878 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
879 const struct ib_cq_init_attr *attr,
880 struct ib_ucontext *ib_context,
881 struct ib_udata *udata)
882 {
883 int entries = attr->cqe;
884 int vector = attr->comp_vector;
885 struct c4iw_dev *rhp;
886 struct c4iw_cq *chp;
887 struct c4iw_create_cq_resp uresp;
888 struct c4iw_ucontext *ucontext = NULL;
889 int ret, wr_len;
890 size_t memsize, hwentries;
891 struct c4iw_mm_entry *mm, *mm2;
892
893 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
894 if (attr->flags)
895 return ERR_PTR(-EINVAL);
896
897 rhp = to_c4iw_dev(ibdev);
898
899 if (vector >= rhp->rdev.lldi.nciq)
900 return ERR_PTR(-EINVAL);
901
902 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
903 if (!chp)
904 return ERR_PTR(-ENOMEM);
905
906 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
907 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
908 if (!chp->destroy_skb) {
909 ret = -ENOMEM;
910 goto err1;
911 }
912
913 if (ib_context)
914 ucontext = to_c4iw_ucontext(ib_context);
915
916 /* account for the status page. */
917 entries++;
918
919 /* IQ needs one extra entry to differentiate full vs empty. */
920 entries++;
921
922 /*
923 * entries must be multiple of 16 for HW.
924 */
925 entries = roundup(entries, 16);
926
927 /*
928 * Make actual HW queue 2x to avoid cdix_inc overflows.
929 */
930 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
931
932 /*
933 * Make HW queue at least 64 entries so GTS updates aren't too
934 * frequent.
935 */
936 if (hwentries < 64)
937 hwentries = 64;
938
939 memsize = hwentries * sizeof *chp->cq.queue;
940
941 /*
942 * memsize must be a multiple of the page size if its a user cq.
943 */
944 if (ucontext)
945 memsize = roundup(memsize, PAGE_SIZE);
946 chp->cq.size = hwentries;
947 chp->cq.memsize = memsize;
948 chp->cq.vector = vector;
949
950 ret = create_cq(&rhp->rdev, &chp->cq,
951 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
952 if (ret)
953 goto err2;
954
955 chp->rhp = rhp;
956 chp->cq.size--; /* status page */
957 chp->ibcq.cqe = entries - 2;
958 spin_lock_init(&chp->lock);
959 spin_lock_init(&chp->comp_handler_lock);
960 atomic_set(&chp->refcnt, 1);
961 init_waitqueue_head(&chp->wait);
962 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
963 if (ret)
964 goto err3;
965
966 if (ucontext) {
967 mm = kmalloc(sizeof *mm, GFP_KERNEL);
968 if (!mm)
969 goto err4;
970 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
971 if (!mm2)
972 goto err5;
973
974 uresp.qid_mask = rhp->rdev.cqmask;
975 uresp.cqid = chp->cq.cqid;
976 uresp.size = chp->cq.size;
977 uresp.memsize = chp->cq.memsize;
978 spin_lock(&ucontext->mmap_lock);
979 uresp.key = ucontext->key;
980 ucontext->key += PAGE_SIZE;
981 uresp.gts_key = ucontext->key;
982 ucontext->key += PAGE_SIZE;
983 spin_unlock(&ucontext->mmap_lock);
984 ret = ib_copy_to_udata(udata, &uresp,
985 sizeof(uresp) - sizeof(uresp.reserved));
986 if (ret)
987 goto err6;
988
989 mm->key = uresp.key;
990 mm->addr = virt_to_phys(chp->cq.queue);
991 mm->len = chp->cq.memsize;
992 insert_mmap(ucontext, mm);
993
994 mm2->key = uresp.gts_key;
995 mm2->addr = chp->cq.bar2_pa;
996 mm2->len = PAGE_SIZE;
997 insert_mmap(ucontext, mm2);
998 }
999 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
1000 __func__, chp->cq.cqid, chp, chp->cq.size,
1001 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
1002 return &chp->ibcq;
1003 err6:
1004 kfree(mm2);
1005 err5:
1006 kfree(mm);
1007 err4:
1008 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1009 err3:
1010 destroy_cq(&chp->rhp->rdev, &chp->cq,
1011 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1012 chp->destroy_skb);
1013 err2:
1014 kfree_skb(chp->destroy_skb);
1015 err1:
1016 kfree(chp);
1017 return ERR_PTR(ret);
1018 }
1019
1020 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1021 {
1022 return -ENOSYS;
1023 }
1024
1025 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1026 {
1027 struct c4iw_cq *chp;
1028 int ret = 0;
1029 unsigned long flag;
1030
1031 chp = to_c4iw_cq(ibcq);
1032 spin_lock_irqsave(&chp->lock, flag);
1033 t4_arm_cq(&chp->cq,
1034 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1035 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1036 ret = t4_cq_notempty(&chp->cq);
1037 spin_unlock_irqrestore(&chp->lock, flag);
1038 return ret;
1039 }