]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/qedr/qedr_cm.c
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / qedr / qedr_cm.c
1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <linux/iommu.h>
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/udp.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_roce_if.h>
48 #include "qedr.h"
49 #include "verbs.h"
50 #include <rdma/qedr-abi.h>
51 #include "qedr_cm.h"
52
53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
54 {
55 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
56 }
57
58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
59 struct ib_qp_init_attr *attrs)
60 {
61 dev->gsi_qp_created = 1;
62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
63 dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
64 dev->gsi_qp = qp;
65 }
66
67 void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
68 {
69 struct qedr_dev *dev = (struct qedr_dev *)_qdev;
70 struct qedr_cq *cq = dev->gsi_sqcq;
71 struct qedr_qp *qp = dev->gsi_qp;
72 unsigned long flags;
73
74 DP_DEBUG(dev, QEDR_MSG_GSI,
75 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
76 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
77 cq->ibcq.comp_handler ? "Yes" : "No");
78
79 dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
80 pkt->header.baddr);
81 kfree(pkt);
82
83 spin_lock_irqsave(&qp->q_lock, flags);
84 qedr_inc_sw_gsi_cons(&qp->sq);
85 spin_unlock_irqrestore(&qp->q_lock, flags);
86
87 if (cq->ibcq.comp_handler)
88 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
89 }
90
91 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
92 struct qed_roce_ll2_rx_params *params)
93 {
94 struct qedr_dev *dev = (struct qedr_dev *)_dev;
95 struct qedr_cq *cq = dev->gsi_rqcq;
96 struct qedr_qp *qp = dev->gsi_qp;
97 unsigned long flags;
98
99 spin_lock_irqsave(&qp->q_lock, flags);
100
101 qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
102 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
103 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
104 ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
105
106 qedr_inc_sw_gsi_cons(&qp->rq);
107
108 spin_unlock_irqrestore(&qp->q_lock, flags);
109
110 if (cq->ibcq.comp_handler)
111 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
112 }
113
114 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
115 struct ib_qp_init_attr *attrs)
116 {
117 struct qed_rdma_destroy_cq_in_params iparams;
118 struct qed_rdma_destroy_cq_out_params oparams;
119 struct qedr_cq *cq;
120
121 cq = get_qedr_cq(attrs->send_cq);
122 iparams.icid = cq->icid;
123 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
124 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
125
126 cq = get_qedr_cq(attrs->recv_cq);
127 /* if a dedicated recv_cq was used, delete it too */
128 if (iparams.icid != cq->icid) {
129 iparams.icid = cq->icid;
130 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
131 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
132 }
133 }
134
135 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
136 struct ib_qp_init_attr *attrs)
137 {
138 if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
139 DP_ERR(dev,
140 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
141 attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
142 return -EINVAL;
143 }
144
145 if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
146 DP_ERR(dev,
147 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
148 attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
149 return -EINVAL;
150 }
151
152 if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
153 DP_ERR(dev,
154 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
155 attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
156 return -EINVAL;
157 }
158
159 return 0;
160 }
161
162 struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
163 struct ib_qp_init_attr *attrs,
164 struct qedr_qp *qp)
165 {
166 struct qed_roce_ll2_params ll2_params;
167 int rc;
168
169 rc = qedr_check_gsi_qp_attrs(dev, attrs);
170 if (rc)
171 return ERR_PTR(rc);
172
173 /* configure and start LL2 */
174 memset(&ll2_params, 0, sizeof(ll2_params));
175 ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
176 ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
177 ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
178 ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
179 ll2_params.cb_cookie = (void *)dev;
180 ll2_params.mtu = dev->ndev->mtu;
181 ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
182 rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
183 if (rc) {
184 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
185 return ERR_PTR(rc);
186 }
187
188 /* create QP */
189 qp->ibqp.qp_num = 1;
190 qp->rq.max_wr = attrs->cap.max_recv_wr;
191 qp->sq.max_wr = attrs->cap.max_send_wr;
192
193 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
194 GFP_KERNEL);
195 if (!qp->rqe_wr_id)
196 goto err;
197 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
198 GFP_KERNEL);
199 if (!qp->wqe_wr_id)
200 goto err;
201
202 qedr_store_gsi_qp_cq(dev, qp, attrs);
203 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
204
205 /* the GSI CQ is handled by the driver so remove it from the FW */
206 qedr_destroy_gsi_cq(dev, attrs);
207 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
208 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
209
210 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
211
212 return &qp->ibqp;
213
214 err:
215 kfree(qp->rqe_wr_id);
216
217 rc = dev->ops->roce_ll2_stop(dev->cdev);
218 if (rc)
219 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
220
221 return ERR_PTR(-ENOMEM);
222 }
223
224 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
225 {
226 int rc;
227
228 rc = dev->ops->roce_ll2_stop(dev->cdev);
229 if (rc)
230 DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
231 else
232 DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
233
234 return rc;
235 }
236
237 #define QEDR_MAX_UD_HEADER_SIZE (100)
238 #define QEDR_GSI_QPN (1)
239 static inline int qedr_gsi_build_header(struct qedr_dev *dev,
240 struct qedr_qp *qp,
241 struct ib_send_wr *swr,
242 struct ib_ud_header *udh,
243 int *roce_mode)
244 {
245 bool has_vlan = false, has_grh_ipv6 = true;
246 struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
247 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
248 union ib_gid sgid;
249 int send_size = 0;
250 u16 vlan_id = 0;
251 u16 ether_type;
252 struct ib_gid_attr sgid_attr;
253 int rc;
254 int ip_ver = 0;
255
256 bool has_udp = false;
257 int i;
258
259 send_size = 0;
260 for (i = 0; i < swr->num_sge; ++i)
261 send_size += swr->sg_list[i].length;
262
263 rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
264 grh->sgid_index, &sgid, &sgid_attr);
265 if (rc) {
266 DP_ERR(dev,
267 "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
268 rdma_ah_get_port_num(ah_attr),
269 grh->sgid_index);
270 return rc;
271 }
272
273 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
274 if (vlan_id < VLAN_CFI_MASK)
275 has_vlan = true;
276 if (sgid_attr.ndev)
277 dev_put(sgid_attr.ndev);
278
279 if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
280 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
281 grh->sgid_index);
282 return -ENOENT;
283 }
284
285 has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
286 if (!has_udp) {
287 /* RoCE v1 */
288 ether_type = ETH_P_IBOE;
289 *roce_mode = ROCE_V1;
290 } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
291 /* RoCE v2 IPv4 */
292 ip_ver = 4;
293 ether_type = ETH_P_IP;
294 has_grh_ipv6 = false;
295 *roce_mode = ROCE_V2_IPV4;
296 } else {
297 /* RoCE v2 IPv6 */
298 ip_ver = 6;
299 ether_type = ETH_P_IPV6;
300 *roce_mode = ROCE_V2_IPV6;
301 }
302
303 rc = ib_ud_header_init(send_size, false, true, has_vlan,
304 has_grh_ipv6, ip_ver, has_udp, 0, udh);
305 if (rc) {
306 DP_ERR(dev, "gsi post send: failed to init header\n");
307 return rc;
308 }
309
310 /* ENET + VLAN headers */
311 ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
312 ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
313 if (has_vlan) {
314 udh->eth.type = htons(ETH_P_8021Q);
315 udh->vlan.tag = htons(vlan_id);
316 udh->vlan.type = htons(ether_type);
317 } else {
318 udh->eth.type = htons(ether_type);
319 }
320
321 /* BTH */
322 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
323 udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
324 udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
325 udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
326 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
327
328 /* DETH */
329 udh->deth.qkey = htonl(0x80010000);
330 udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
331
332 if (has_grh_ipv6) {
333 /* GRH / IPv6 header */
334 udh->grh.traffic_class = grh->traffic_class;
335 udh->grh.flow_label = grh->flow_label;
336 udh->grh.hop_limit = grh->hop_limit;
337 udh->grh.destination_gid = grh->dgid;
338 memcpy(&udh->grh.source_gid.raw, &sgid.raw,
339 sizeof(udh->grh.source_gid.raw));
340 } else {
341 /* IPv4 header */
342 u32 ipv4_addr;
343
344 udh->ip4.protocol = IPPROTO_UDP;
345 udh->ip4.tos = htonl(grh->flow_label);
346 udh->ip4.frag_off = htons(IP_DF);
347 udh->ip4.ttl = grh->hop_limit;
348
349 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
350 udh->ip4.saddr = ipv4_addr;
351 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
352 udh->ip4.daddr = ipv4_addr;
353 /* note: checksum is calculated by the device */
354 }
355
356 /* UDP */
357 if (has_udp) {
358 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
359 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
360 udh->udp.csum = 0;
361 /* UDP length is untouched hence is zero */
362 }
363 return 0;
364 }
365
366 static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
367 struct qedr_qp *qp,
368 struct ib_send_wr *swr,
369 struct qed_roce_ll2_packet **p_packet)
370 {
371 u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
372 struct qed_roce_ll2_packet *packet;
373 struct pci_dev *pdev = dev->pdev;
374 int roce_mode, header_size;
375 struct ib_ud_header udh;
376 int i, rc;
377
378 *p_packet = NULL;
379
380 rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
381 if (rc)
382 return rc;
383
384 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
385
386 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
387 if (!packet)
388 return -ENOMEM;
389
390 packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
391 &packet->header.baddr,
392 GFP_ATOMIC);
393 if (!packet->header.vaddr) {
394 kfree(packet);
395 return -ENOMEM;
396 }
397
398 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
399 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
400 else
401 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
402
403 packet->roce_mode = roce_mode;
404 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
405 packet->header.len = header_size;
406 packet->n_seg = swr->num_sge;
407 for (i = 0; i < packet->n_seg; i++) {
408 packet->payload[i].baddr = swr->sg_list[i].addr;
409 packet->payload[i].len = swr->sg_list[i].length;
410 }
411
412 *p_packet = packet;
413
414 return 0;
415 }
416
417 int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
418 struct ib_send_wr **bad_wr)
419 {
420 struct qed_roce_ll2_packet *pkt = NULL;
421 struct qedr_qp *qp = get_qedr_qp(ibqp);
422 struct qed_roce_ll2_tx_params params;
423 struct qedr_dev *dev = qp->dev;
424 unsigned long flags;
425 int rc;
426
427 if (qp->state != QED_ROCE_QP_STATE_RTS) {
428 *bad_wr = wr;
429 DP_ERR(dev,
430 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
431 qp->state);
432 return -EINVAL;
433 }
434
435 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
436 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
437 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
438 rc = -EINVAL;
439 goto err;
440 }
441
442 if (wr->opcode != IB_WR_SEND) {
443 DP_ERR(dev,
444 "gsi post send: failed due to unsupported opcode %d\n",
445 wr->opcode);
446 rc = -EINVAL;
447 goto err;
448 }
449
450 memset(&params, 0, sizeof(params));
451
452 spin_lock_irqsave(&qp->q_lock, flags);
453
454 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
455 if (rc) {
456 spin_unlock_irqrestore(&qp->q_lock, flags);
457 goto err;
458 }
459
460 rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
461 if (!rc) {
462 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
463 qedr_inc_sw_prod(&qp->sq);
464 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
465 "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
466 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
467 } else {
468 if (rc == QED_ROCE_TX_HEAD_FAILURE) {
469 /* TX failed while posting header - release resources */
470 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
471 pkt->header.vaddr, pkt->header.baddr);
472 kfree(pkt);
473 } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
474 /* NTD since TX failed while posting a fragment. We will
475 * release the resources on TX callback
476 */
477 }
478
479 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
480 rc = -EAGAIN;
481 *bad_wr = wr;
482 }
483
484 spin_unlock_irqrestore(&qp->q_lock, flags);
485
486 if (wr->next) {
487 DP_ERR(dev,
488 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
489 *bad_wr = wr->next;
490 rc = -EINVAL;
491 }
492
493 return rc;
494
495 err:
496 *bad_wr = wr;
497 return rc;
498 }
499
500 int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
501 struct ib_recv_wr **bad_wr)
502 {
503 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
504 struct qedr_qp *qp = get_qedr_qp(ibqp);
505 struct qed_roce_ll2_buffer buf;
506 unsigned long flags;
507 int status = 0;
508 int rc;
509
510 if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
511 (qp->state != QED_ROCE_QP_STATE_RTS)) {
512 *bad_wr = wr;
513 DP_ERR(dev,
514 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
515 qp->state);
516 return -EINVAL;
517 }
518
519 memset(&buf, 0, sizeof(buf));
520
521 spin_lock_irqsave(&qp->q_lock, flags);
522
523 while (wr) {
524 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
525 DP_ERR(dev,
526 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
527 wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
528 goto err;
529 }
530
531 buf.baddr = wr->sg_list[0].addr;
532 buf.len = wr->sg_list[0].length;
533
534 rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
535 if (rc) {
536 DP_ERR(dev,
537 "gsi post recv: failed to post rx buffer (rc=%d)\n",
538 rc);
539 goto err;
540 }
541
542 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
543 sizeof(qp->rqe_wr_id[qp->rq.prod]));
544 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
545 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
546
547 qedr_inc_sw_prod(&qp->rq);
548
549 wr = wr->next;
550 }
551
552 spin_unlock_irqrestore(&qp->q_lock, flags);
553
554 return status;
555 err:
556 spin_unlock_irqrestore(&qp->q_lock, flags);
557 *bad_wr = wr;
558 return -ENOMEM;
559 }
560
561 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
562 {
563 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
564 struct qedr_cq *cq = get_qedr_cq(ibcq);
565 struct qedr_qp *qp = dev->gsi_qp;
566 unsigned long flags;
567 int i = 0;
568
569 spin_lock_irqsave(&cq->cq_lock, flags);
570
571 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
572 memset(&wc[i], 0, sizeof(*wc));
573
574 wc[i].qp = &qp->ibqp;
575 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
576 wc[i].opcode = IB_WC_RECV;
577 wc[i].pkey_index = 0;
578 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
579 IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
580 /* 0 - currently only one recv sg is supported */
581 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
582 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
583 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
584 wc[i].wc_flags |= IB_WC_WITH_SMAC;
585 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
586 wc[i].wc_flags |= IB_WC_WITH_VLAN;
587 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
588 }
589
590 qedr_inc_sw_cons(&qp->rq);
591 i++;
592 }
593
594 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
595 memset(&wc[i], 0, sizeof(*wc));
596
597 wc[i].qp = &qp->ibqp;
598 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
599 wc[i].opcode = IB_WC_SEND;
600 wc[i].status = IB_WC_SUCCESS;
601
602 qedr_inc_sw_cons(&qp->sq);
603 i++;
604 }
605
606 spin_unlock_irqrestore(&cq->cq_lock, flags);
607
608 DP_DEBUG(dev, QEDR_MSG_GSI,
609 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
610 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
611 qp->sq.gsi_cons, qp->ibqp.qp_num);
612
613 return i;
614 }