]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/bnxt_re/ib_verbs.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
CommitLineData
1ac5a404
SX
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
9152e0b7
EW
64static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
1ac5a404
SX
106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
58d4a671
SX
148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
1ac5a404
SX
150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
a25d112f 175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
254cd259
DS
176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
1ac5a404
SX
180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
86816a00
SX
194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
1ac5a404
SX
196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
601577b7 204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
1ac5a404
SX
205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
226static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
227{
228 struct ethtool_link_ksettings lksettings;
229 u32 espeed;
230
231 if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
232 memset(&lksettings, 0, sizeof(lksettings));
233 rtnl_lock();
234 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
235 rtnl_unlock();
236 espeed = lksettings.base.speed;
237 } else {
238 espeed = SPEED_UNKNOWN;
239 }
240 switch (espeed) {
241 case SPEED_1000:
242 *speed = IB_SPEED_SDR;
243 *width = IB_WIDTH_1X;
244 break;
245 case SPEED_10000:
246 *speed = IB_SPEED_QDR;
247 *width = IB_WIDTH_1X;
248 break;
249 case SPEED_20000:
250 *speed = IB_SPEED_DDR;
251 *width = IB_WIDTH_4X;
252 break;
253 case SPEED_25000:
254 *speed = IB_SPEED_EDR;
255 *width = IB_WIDTH_1X;
256 break;
257 case SPEED_40000:
258 *speed = IB_SPEED_QDR;
259 *width = IB_WIDTH_4X;
260 break;
261 case SPEED_50000:
262 break;
263 default:
264 *speed = IB_SPEED_SDR;
265 *width = IB_WIDTH_1X;
266 break;
267 }
268}
269
270/* Port */
271int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
272 struct ib_port_attr *port_attr)
273{
274 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
276
277 memset(port_attr, 0, sizeof(*port_attr));
278
279 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
280 port_attr->state = IB_PORT_ACTIVE;
281 port_attr->phys_state = 5;
282 } else {
283 port_attr->state = IB_PORT_DOWN;
284 port_attr->phys_state = 3;
285 }
286 port_attr->max_mtu = IB_MTU_4096;
287 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
288 port_attr->gid_tbl_len = dev_attr->max_sgid;
289 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
290 IB_PORT_DEVICE_MGMT_SUP |
291 IB_PORT_VENDOR_CLASS_SUP |
292 IB_PORT_IP_BASED_GIDS;
293
294 /* Max MSG size set to 2G for now */
295 port_attr->max_msg_sz = 0x80000000;
296 port_attr->bad_pkey_cntr = 0;
297 port_attr->qkey_viol_cntr = 0;
298 port_attr->pkey_tbl_len = dev_attr->max_pkey;
299 port_attr->lid = 0;
300 port_attr->sm_lid = 0;
301 port_attr->lmc = 0;
302 port_attr->max_vl_num = 4;
303 port_attr->sm_sl = 0;
304 port_attr->subnet_timeout = 0;
305 port_attr->init_type_reply = 0;
306 /* call the underlying netdev's ethtool hooks to query speed settings
307 * for which we acquire rtnl_lock _only_ if it's registered with
308 * IB stack to avoid race in the NETDEV_UNREG path
309 */
310 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
311 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
312 &port_attr->active_width);
313 return 0;
314}
315
316int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
317 int port_modify_mask,
318 struct ib_port_modify *port_modify)
319{
320 switch (port_modify_mask) {
321 case IB_PORT_SHUTDOWN:
322 break;
323 case IB_PORT_INIT_TYPE:
324 break;
325 case IB_PORT_RESET_QKEY_CNTR:
326 break;
327 default:
328 break;
329 }
330 return 0;
331}
332
333int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
334 struct ib_port_immutable *immutable)
335{
336 struct ib_port_attr port_attr;
337
338 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
339 return -EINVAL;
340
341 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
342 immutable->gid_tbl_len = port_attr.gid_tbl_len;
343 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
344 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
345 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
346 return 0;
347}
348
349int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
350 u16 index, u16 *pkey)
351{
352 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
353
354 /* Ignore port_num */
355
356 memset(pkey, 0, sizeof(*pkey));
357 return bnxt_qplib_get_pkey(&rdev->qplib_res,
358 &rdev->qplib_res.pkey_tbl, index, pkey);
359}
360
361int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
362 int index, union ib_gid *gid)
363{
364 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
365 int rc = 0;
366
367 /* Ignore port_num */
368 memset(gid, 0, sizeof(*gid));
369 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
370 &rdev->qplib_res.sgid_tbl, index,
371 (struct bnxt_qplib_gid *)gid);
372 return rc;
373}
374
375int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
376 unsigned int index, void **context)
377{
378 int rc = 0;
379 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
380 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
381 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
382
383 /* Delete the entry from the hardware */
384 ctx = *context;
385 if (!ctx)
386 return -EINVAL;
387
388 if (sgid_tbl && sgid_tbl->active) {
389 if (ctx->idx >= sgid_tbl->max)
390 return -EINVAL;
391 ctx->refcnt--;
392 if (!ctx->refcnt) {
4a62c5e9
SX
393 rc = bnxt_qplib_del_sgid(sgid_tbl,
394 &sgid_tbl->tbl[ctx->idx],
395 true);
396 if (rc) {
1ac5a404
SX
397 dev_err(rdev_to_dev(rdev),
398 "Failed to remove GID: %#x", rc);
4a62c5e9
SX
399 } else {
400 ctx_tbl = sgid_tbl->ctx;
401 ctx_tbl[ctx->idx] = NULL;
402 kfree(ctx);
403 }
1ac5a404
SX
404 }
405 } else {
406 return -EINVAL;
407 }
408 return rc;
409}
410
411int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
412 unsigned int index, const union ib_gid *gid,
413 const struct ib_gid_attr *attr, void **context)
414{
415 int rc;
416 u32 tbl_idx = 0;
417 u16 vlan_id = 0xFFFF;
418 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
419 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
420 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
421
422 if ((attr->ndev) && is_vlan_dev(attr->ndev))
423 vlan_id = vlan_dev_vlan_id(attr->ndev);
424
425 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
426 rdev->qplib_res.netdev->dev_addr,
427 vlan_id, true, &tbl_idx);
428 if (rc == -EALREADY) {
429 ctx_tbl = sgid_tbl->ctx;
430 ctx_tbl[tbl_idx]->refcnt++;
431 *context = ctx_tbl[tbl_idx];
432 return 0;
433 }
434
435 if (rc < 0) {
436 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
437 return rc;
438 }
439
440 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
441 if (!ctx)
442 return -ENOMEM;
443 ctx_tbl = sgid_tbl->ctx;
444 ctx->idx = tbl_idx;
445 ctx->refcnt = 1;
446 ctx_tbl[tbl_idx] = ctx;
447
448 return rc;
449}
450
451enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
452 u8 port_num)
453{
454 return IB_LINK_LAYER_ETHERNET;
455}
456
9152e0b7
EW
457#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
458
459static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
460{
461 struct bnxt_re_fence_data *fence = &pd->fence;
462 struct ib_mr *ib_mr = &fence->mr->ib_mr;
463 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
464
465 memset(wqe, 0, sizeof(*wqe));
466 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
467 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
468 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
469 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
470 wqe->bind.zero_based = false;
471 wqe->bind.parent_l_key = ib_mr->lkey;
472 wqe->bind.va = (u64)(unsigned long)fence->va;
473 wqe->bind.length = fence->size;
474 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
475 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
476
477 /* Save the initial rkey in fence structure for now;
478 * wqe->bind.r_key will be set at (re)bind time.
479 */
480 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
481}
482
483static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
484{
485 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
486 qplib_qp);
487 struct ib_pd *ib_pd = qp->ib_qp.pd;
488 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
489 struct bnxt_re_fence_data *fence = &pd->fence;
490 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
491 struct bnxt_qplib_swqe wqe;
492 int rc;
493
494 memcpy(&wqe, fence_wqe, sizeof(wqe));
495 wqe.bind.r_key = fence->bind_rkey;
496 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
497
498 dev_dbg(rdev_to_dev(qp->rdev),
499 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
500 wqe.bind.r_key, qp->qplib_qp.id, pd);
501 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
502 if (rc) {
503 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
504 return rc;
505 }
506 bnxt_qplib_post_send_db(&qp->qplib_qp);
507
508 return rc;
509}
510
511static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
512{
513 struct bnxt_re_fence_data *fence = &pd->fence;
514 struct bnxt_re_dev *rdev = pd->rdev;
515 struct device *dev = &rdev->en_dev->pdev->dev;
516 struct bnxt_re_mr *mr = fence->mr;
517
518 if (fence->mw) {
519 bnxt_re_dealloc_mw(fence->mw);
520 fence->mw = NULL;
521 }
522 if (mr) {
523 if (mr->ib_mr.rkey)
524 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
525 true);
526 if (mr->ib_mr.lkey)
527 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
528 kfree(mr);
529 fence->mr = NULL;
530 }
531 if (fence->dma_addr) {
532 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
533 DMA_BIDIRECTIONAL);
534 fence->dma_addr = 0;
535 }
536}
537
538static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
539{
540 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
541 struct bnxt_re_fence_data *fence = &pd->fence;
542 struct bnxt_re_dev *rdev = pd->rdev;
543 struct device *dev = &rdev->en_dev->pdev->dev;
544 struct bnxt_re_mr *mr = NULL;
545 dma_addr_t dma_addr = 0;
546 struct ib_mw *mw;
547 u64 pbl_tbl;
548 int rc;
549
550 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
551 DMA_BIDIRECTIONAL);
552 rc = dma_mapping_error(dev, dma_addr);
553 if (rc) {
554 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
555 rc = -EIO;
556 fence->dma_addr = 0;
557 goto fail;
558 }
559 fence->dma_addr = dma_addr;
560
561 /* Allocate a MR */
562 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
563 if (!mr) {
564 rc = -ENOMEM;
565 goto fail;
566 }
567 fence->mr = mr;
568 mr->rdev = rdev;
569 mr->qplib_mr.pd = &pd->qplib_pd;
570 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
571 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
572 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
573 if (rc) {
574 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
575 goto fail;
576 }
577
578 /* Register MR */
579 mr->ib_mr.lkey = mr->qplib_mr.lkey;
580 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
581 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
582 pbl_tbl = dma_addr;
583 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
584 BNXT_RE_FENCE_PBL_SIZE, false);
585 if (rc) {
586 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
587 goto fail;
588 }
589 mr->ib_mr.rkey = mr->qplib_mr.rkey;
590
591 /* Create a fence MW only for kernel consumers */
592 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
653f0a71 593 if (IS_ERR(mw)) {
9152e0b7
EW
594 dev_err(rdev_to_dev(rdev),
595 "Failed to create fence-MW for PD: %p\n", pd);
653f0a71 596 rc = PTR_ERR(mw);
9152e0b7
EW
597 goto fail;
598 }
599 fence->mw = mw;
600
601 bnxt_re_create_fence_wqe(pd);
602 return 0;
603
604fail:
605 bnxt_re_destroy_fence_mr(pd);
606 return rc;
607}
608
1ac5a404
SX
609/* Protection Domains */
610int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
611{
612 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
613 struct bnxt_re_dev *rdev = pd->rdev;
614 int rc;
615
9152e0b7 616 bnxt_re_destroy_fence_mr(pd);
1ac5a404 617
b3b2c7c5
DS
618 if (pd->qplib_pd.id) {
619 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
620 &rdev->qplib_res.pd_tbl,
621 &pd->qplib_pd);
1ac5a404 622 if (rc)
b3b2c7c5 623 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
1ac5a404
SX
624 }
625
626 kfree(pd);
627 return 0;
628}
629
630struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
631 struct ib_ucontext *ucontext,
632 struct ib_udata *udata)
633{
634 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
635 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
636 struct bnxt_re_ucontext,
637 ib_uctx);
638 struct bnxt_re_pd *pd;
639 int rc;
640
641 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
642 if (!pd)
643 return ERR_PTR(-ENOMEM);
644
645 pd->rdev = rdev;
646 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
647 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
648 rc = -ENOMEM;
649 goto fail;
650 }
651
652 if (udata) {
653 struct bnxt_re_pd_resp resp;
654
b3b2c7c5 655 if (!ucntx->dpi.dbr) {
1ac5a404
SX
656 /* Allocate DPI in alloc_pd to avoid failing of
657 * ibv_devinfo and family of application when DPIs
658 * are depleted.
659 */
660 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
b3b2c7c5 661 &ucntx->dpi, ucntx)) {
1ac5a404
SX
662 rc = -ENOMEM;
663 goto dbfail;
664 }
1ac5a404
SX
665 }
666
667 resp.pdid = pd->qplib_pd.id;
668 /* Still allow mapping this DBR to the new user PD. */
b3b2c7c5
DS
669 resp.dpi = ucntx->dpi.dpi;
670 resp.dbr = (u64)ucntx->dpi.umdbr;
1ac5a404
SX
671
672 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
673 if (rc) {
674 dev_err(rdev_to_dev(rdev),
675 "Failed to copy user response\n");
676 goto dbfail;
677 }
678 }
679
9152e0b7
EW
680 if (!udata)
681 if (bnxt_re_create_fence_mr(pd))
682 dev_warn(rdev_to_dev(rdev),
683 "Failed to create Fence-MR\n");
1ac5a404
SX
684 return &pd->ib_pd;
685dbfail:
686 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
687 &pd->qplib_pd);
688fail:
689 kfree(pd);
690 return ERR_PTR(rc);
691}
692
693/* Address Handles */
694int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
695{
696 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
697 struct bnxt_re_dev *rdev = ah->rdev;
698 int rc;
699
700 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
701 if (rc) {
702 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
703 return rc;
704 }
705 kfree(ah);
706 return 0;
707}
708
709struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
90898850 710 struct rdma_ah_attr *ah_attr,
1ac5a404
SX
711 struct ib_udata *udata)
712{
713 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
714 struct bnxt_re_dev *rdev = pd->rdev;
715 struct bnxt_re_ah *ah;
d8966fcd 716 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
1ac5a404
SX
717 int rc;
718 u16 vlan_tag;
719 u8 nw_type;
720
721 struct ib_gid_attr sgid_attr;
722
d8966fcd 723 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
1ac5a404
SX
724 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
725 return ERR_PTR(-EINVAL);
726 }
727 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
728 if (!ah)
729 return ERR_PTR(-ENOMEM);
730
731 ah->rdev = rdev;
732 ah->qplib_ah.pd = &pd->qplib_pd;
733
734 /* Supply the configuration for the HW */
d8966fcd 735 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
1ac5a404
SX
736 sizeof(union ib_gid));
737 /*
738 * If RoCE V2 is enabled, stack will have two entries for
739 * each GID entry. Avoiding this duplicte entry in HW. Dividing
740 * the GID index by 2 for RoCE V2
741 */
d8966fcd
DC
742 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
743 ah->qplib_ah.host_sgid_index = grh->sgid_index;
744 ah->qplib_ah.traffic_class = grh->traffic_class;
745 ah->qplib_ah.flow_label = grh->flow_label;
746 ah->qplib_ah.hop_limit = grh->hop_limit;
747 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
1ac5a404
SX
748 if (ib_pd->uobject &&
749 !rdma_is_multicast_addr((struct in6_addr *)
d8966fcd 750 grh->dgid.raw) &&
1ac5a404 751 !rdma_link_local_addr((struct in6_addr *)
d8966fcd 752 grh->dgid.raw)) {
1ac5a404
SX
753 union ib_gid sgid;
754
755 rc = ib_get_cached_gid(&rdev->ibdev, 1,
d8966fcd 756 grh->sgid_index, &sgid,
1ac5a404
SX
757 &sgid_attr);
758 if (rc) {
759 dev_err(rdev_to_dev(rdev),
760 "Failed to query gid at index %d",
d8966fcd 761 grh->sgid_index);
1ac5a404
SX
762 goto fail;
763 }
764 if (sgid_attr.ndev) {
765 if (is_vlan_dev(sgid_attr.ndev))
766 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
767 dev_put(sgid_attr.ndev);
768 }
769 /* Get network header type for this GID */
770 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
771 switch (nw_type) {
772 case RDMA_NETWORK_IPV4:
773 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
774 break;
775 case RDMA_NETWORK_IPV6:
776 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
777 break;
778 default:
779 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
780 break;
781 }
d8966fcd 782 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
44c58487 783 ah_attr->roce.dmac, &vlan_tag,
1ac5a404
SX
784 &sgid_attr.ndev->ifindex,
785 NULL);
786 if (rc) {
787 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
788 goto fail;
789 }
790 }
791
44c58487 792 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
1ac5a404
SX
793 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
794 if (rc) {
795 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
796 goto fail;
797 }
798
799 /* Write AVID to shared page. */
800 if (ib_pd->uobject) {
801 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
802 struct bnxt_re_ucontext *uctx;
803 unsigned long flag;
804 u32 *wrptr;
805
806 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
807 spin_lock_irqsave(&uctx->sh_lock, flag);
808 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
809 *wrptr = ah->qplib_ah.id;
810 wmb(); /* make sure cache is updated. */
811 spin_unlock_irqrestore(&uctx->sh_lock, flag);
812 }
813
814 return &ah->ib_ah;
815
816fail:
817 kfree(ah);
818 return ERR_PTR(rc);
819}
820
90898850 821int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
1ac5a404
SX
822{
823 return 0;
824}
825
90898850 826int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
1ac5a404
SX
827{
828 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
829
44c58487 830 ah_attr->type = ib_ah->type;
d8966fcd 831 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
44c58487 832 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
d8966fcd
DC
833 rdma_ah_set_grh(ah_attr, NULL, 0,
834 ah->qplib_ah.host_sgid_index,
835 0, ah->qplib_ah.traffic_class);
836 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
837 rdma_ah_set_port_num(ah_attr, 1);
838 rdma_ah_set_static_rate(ah_attr, 0);
1ac5a404
SX
839 return 0;
840}
841
842/* Queue Pairs */
843int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
844{
845 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
846 struct bnxt_re_dev *rdev = qp->rdev;
847 int rc;
848
849 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
850 if (rc) {
851 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
852 return rc;
853 }
854 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
855 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
856 &rdev->sqp_ah->qplib_ah);
857 if (rc) {
858 dev_err(rdev_to_dev(rdev),
859 "Failed to destroy HW AH for shadow QP");
860 return rc;
861 }
862
863 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
864 &rdev->qp1_sqp->qplib_qp);
865 if (rc) {
866 dev_err(rdev_to_dev(rdev),
867 "Failed to destroy Shadow QP");
868 return rc;
869 }
870 mutex_lock(&rdev->qp_lock);
871 list_del(&rdev->qp1_sqp->list);
872 atomic_dec(&rdev->qp_count);
873 mutex_unlock(&rdev->qp_lock);
874
875 kfree(rdev->sqp_ah);
876 kfree(rdev->qp1_sqp);
877 }
878
374cb861 879 if (!IS_ERR_OR_NULL(qp->rumem))
1ac5a404 880 ib_umem_release(qp->rumem);
374cb861 881 if (!IS_ERR_OR_NULL(qp->sumem))
1ac5a404
SX
882 ib_umem_release(qp->sumem);
883
884 mutex_lock(&rdev->qp_lock);
885 list_del(&qp->list);
886 atomic_dec(&rdev->qp_count);
887 mutex_unlock(&rdev->qp_lock);
888 kfree(qp);
889 return 0;
890}
891
892static u8 __from_ib_qp_type(enum ib_qp_type type)
893{
894 switch (type) {
895 case IB_QPT_GSI:
896 return CMDQ_CREATE_QP1_TYPE_GSI;
897 case IB_QPT_RC:
898 return CMDQ_CREATE_QP_TYPE_RC;
899 case IB_QPT_UD:
900 return CMDQ_CREATE_QP_TYPE_UD;
901 default:
902 return IB_QPT_MAX;
903 }
904}
905
906static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
907 struct bnxt_re_qp *qp, struct ib_udata *udata)
908{
909 struct bnxt_re_qp_req ureq;
910 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
911 struct ib_umem *umem;
912 int bytes = 0;
913 struct ib_ucontext *context = pd->ib_pd.uobject->context;
914 struct bnxt_re_ucontext *cntx = container_of(context,
915 struct bnxt_re_ucontext,
916 ib_uctx);
917 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
918 return -EFAULT;
919
920 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
921 /* Consider mapping PSN search memory only for RC QPs. */
922 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
923 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
924 bytes = PAGE_ALIGN(bytes);
925 umem = ib_umem_get(context, ureq.qpsva, bytes,
926 IB_ACCESS_LOCAL_WRITE, 1);
927 if (IS_ERR(umem))
928 return PTR_ERR(umem);
929
930 qp->sumem = umem;
931 qplib_qp->sq.sglist = umem->sg_head.sgl;
932 qplib_qp->sq.nmap = umem->nmap;
933 qplib_qp->qp_handle = ureq.qp_handle;
934
935 if (!qp->qplib_qp.srq) {
936 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
937 bytes = PAGE_ALIGN(bytes);
938 umem = ib_umem_get(context, ureq.qprva, bytes,
939 IB_ACCESS_LOCAL_WRITE, 1);
940 if (IS_ERR(umem))
941 goto rqfail;
942 qp->rumem = umem;
943 qplib_qp->rq.sglist = umem->sg_head.sgl;
944 qplib_qp->rq.nmap = umem->nmap;
945 }
946
b3b2c7c5 947 qplib_qp->dpi = &cntx->dpi;
1ac5a404
SX
948 return 0;
949rqfail:
950 ib_umem_release(qp->sumem);
951 qp->sumem = NULL;
952 qplib_qp->sq.sglist = NULL;
953 qplib_qp->sq.nmap = 0;
954
955 return PTR_ERR(umem);
956}
957
958static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
959 (struct bnxt_re_pd *pd,
960 struct bnxt_qplib_res *qp1_res,
961 struct bnxt_qplib_qp *qp1_qp)
962{
963 struct bnxt_re_dev *rdev = pd->rdev;
964 struct bnxt_re_ah *ah;
965 union ib_gid sgid;
966 int rc;
967
968 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
969 if (!ah)
970 return NULL;
971
972 memset(ah, 0, sizeof(*ah));
973 ah->rdev = rdev;
974 ah->qplib_ah.pd = &pd->qplib_pd;
975
976 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
977 if (rc)
978 goto fail;
979
980 /* supply the dgid data same as sgid */
981 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
982 sizeof(union ib_gid));
983 ah->qplib_ah.sgid_index = 0;
984
985 ah->qplib_ah.traffic_class = 0;
986 ah->qplib_ah.flow_label = 0;
987 ah->qplib_ah.hop_limit = 1;
988 ah->qplib_ah.sl = 0;
989 /* Have DMAC same as SMAC */
990 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
991
992 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
993 if (rc) {
994 dev_err(rdev_to_dev(rdev),
995 "Failed to allocate HW AH for Shadow QP");
996 goto fail;
997 }
998
999 return ah;
1000
1001fail:
1002 kfree(ah);
1003 return NULL;
1004}
1005
1006static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1007 (struct bnxt_re_pd *pd,
1008 struct bnxt_qplib_res *qp1_res,
1009 struct bnxt_qplib_qp *qp1_qp)
1010{
1011 struct bnxt_re_dev *rdev = pd->rdev;
1012 struct bnxt_re_qp *qp;
1013 int rc;
1014
1015 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1016 if (!qp)
1017 return NULL;
1018
1019 memset(qp, 0, sizeof(*qp));
1020 qp->rdev = rdev;
1021
1022 /* Initialize the shadow QP structure from the QP1 values */
1023 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1024
1025 qp->qplib_qp.pd = &pd->qplib_pd;
1026 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1027 qp->qplib_qp.type = IB_QPT_UD;
1028
1029 qp->qplib_qp.max_inline_data = 0;
1030 qp->qplib_qp.sig_type = true;
1031
1032 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1033 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1034 qp->qplib_qp.sq.max_sge = 2;
9152e0b7
EW
1035 /* Q full delta can be 1 since it is internal QP */
1036 qp->qplib_qp.sq.q_full_delta = 1;
1ac5a404
SX
1037
1038 qp->qplib_qp.scq = qp1_qp->scq;
1039 qp->qplib_qp.rcq = qp1_qp->rcq;
1040
1041 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1042 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
9152e0b7
EW
1043 /* Q full delta can be 1 since it is internal QP */
1044 qp->qplib_qp.rq.q_full_delta = 1;
1ac5a404
SX
1045
1046 qp->qplib_qp.mtu = qp1_qp->mtu;
1047
1048 qp->qplib_qp.sq_hdr_buf_size = 0;
1049 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1050 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1051
1052 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1053 if (rc)
1054 goto fail;
1055
1056 rdev->sqp_id = qp->qplib_qp.id;
1057
1058 spin_lock_init(&qp->sq_lock);
1059 INIT_LIST_HEAD(&qp->list);
1060 mutex_lock(&rdev->qp_lock);
1061 list_add_tail(&qp->list, &rdev->qp_list);
1062 atomic_inc(&rdev->qp_count);
1063 mutex_unlock(&rdev->qp_lock);
1064 return qp;
1065fail:
1066 kfree(qp);
1067 return NULL;
1068}
1069
1070struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1071 struct ib_qp_init_attr *qp_init_attr,
1072 struct ib_udata *udata)
1073{
1074 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1075 struct bnxt_re_dev *rdev = pd->rdev;
1076 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1077 struct bnxt_re_qp *qp;
1078 struct bnxt_re_cq *cq;
1079 int rc, entries;
1080
1081 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1082 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1083 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1084 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1085 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1086 return ERR_PTR(-EINVAL);
1087
1088 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1089 if (!qp)
1090 return ERR_PTR(-ENOMEM);
1091
1092 qp->rdev = rdev;
1093 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1094 qp->qplib_qp.pd = &pd->qplib_pd;
1095 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1096 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1097 if (qp->qplib_qp.type == IB_QPT_MAX) {
1098 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1099 qp->qplib_qp.type);
1100 rc = -EINVAL;
1101 goto fail;
1102 }
1103 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1104 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1105 IB_SIGNAL_ALL_WR) ? true : false);
1106
1ac5a404
SX
1107 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1108 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1109 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1110
1111 if (qp_init_attr->send_cq) {
1112 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1113 ib_cq);
1114 if (!cq) {
1115 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1116 rc = -EINVAL;
1117 goto fail;
1118 }
1119 qp->qplib_qp.scq = &cq->qplib_cq;
1120 }
1121
1122 if (qp_init_attr->recv_cq) {
1123 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1124 ib_cq);
1125 if (!cq) {
1126 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1127 rc = -EINVAL;
1128 goto fail;
1129 }
1130 qp->qplib_qp.rcq = &cq->qplib_cq;
1131 }
1132
1133 if (qp_init_attr->srq) {
1134 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1135 rc = -ENOTSUPP;
1136 goto fail;
1137 } else {
1138 /* Allocate 1 more than what's provided so posting max doesn't
1139 * mean empty
1140 */
1141 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1142 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1143 dev_attr->max_qp_wqes + 1);
1144
9152e0b7
EW
1145 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1146 qp_init_attr->cap.max_recv_wr;
1147
1ac5a404
SX
1148 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1149 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1150 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1151 }
1152
1153 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1154
1155 if (qp_init_attr->qp_type == IB_QPT_GSI) {
9152e0b7
EW
1156 /* Allocate 1 more than what's provided */
1157 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1158 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1159 dev_attr->max_qp_wqes + 1);
1160 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1161 qp_init_attr->cap.max_send_wr;
1ac5a404
SX
1162 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1163 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1164 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1165 qp->qplib_qp.sq.max_sge++;
1166 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1167 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1168
1169 qp->qplib_qp.rq_hdr_buf_size =
1170 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1171
1172 qp->qplib_qp.sq_hdr_buf_size =
1173 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1174 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1175 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1176 if (rc) {
1177 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1178 goto fail;
1179 }
1180 /* Create a shadow QP to handle the QP1 traffic */
1181 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1182 &qp->qplib_qp);
1183 if (!rdev->qp1_sqp) {
1184 rc = -EINVAL;
1185 dev_err(rdev_to_dev(rdev),
1186 "Failed to create Shadow QP for QP1");
1187 goto qp_destroy;
1188 }
1189 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1190 &qp->qplib_qp);
1191 if (!rdev->sqp_ah) {
1192 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1193 &rdev->qp1_sqp->qplib_qp);
1194 rc = -EINVAL;
1195 dev_err(rdev_to_dev(rdev),
1196 "Failed to create AH entry for ShadowQP");
1197 goto qp_destroy;
1198 }
1199
1200 } else {
9152e0b7
EW
1201 /* Allocate 128 + 1 more than what's provided */
1202 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1203 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1204 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1205 dev_attr->max_qp_wqes +
1206 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1207 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1208
1209 /*
1210 * Reserving one slot for Phantom WQE. Application can
1211 * post one extra entry in this case. But allowing this to avoid
1212 * unexpected Queue full condition
1213 */
1214
1215 qp->qplib_qp.sq.q_full_delta -= 1;
1216
1ac5a404
SX
1217 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1218 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1219 if (udata) {
1220 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1221 if (rc)
1222 goto fail;
1223 } else {
1224 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1225 }
1226
1227 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1228 if (rc) {
1229 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1230 goto fail;
1231 }
1232 }
1233
1234 qp->ib_qp.qp_num = qp->qplib_qp.id;
1235 spin_lock_init(&qp->sq_lock);
018cf599 1236 spin_lock_init(&qp->rq_lock);
1ac5a404
SX
1237
1238 if (udata) {
1239 struct bnxt_re_qp_resp resp;
1240
1241 resp.qpid = qp->ib_qp.qp_num;
1242 resp.rsvd = 0;
1243 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1244 if (rc) {
1245 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1246 goto qp_destroy;
1247 }
1248 }
1249 INIT_LIST_HEAD(&qp->list);
1250 mutex_lock(&rdev->qp_lock);
1251 list_add_tail(&qp->list, &rdev->qp_list);
1252 atomic_inc(&rdev->qp_count);
1253 mutex_unlock(&rdev->qp_lock);
1254
1255 return &qp->ib_qp;
1256qp_destroy:
1257 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1258fail:
1259 kfree(qp);
1260 return ERR_PTR(rc);
1261}
1262
1263static u8 __from_ib_qp_state(enum ib_qp_state state)
1264{
1265 switch (state) {
1266 case IB_QPS_RESET:
1267 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1268 case IB_QPS_INIT:
1269 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1270 case IB_QPS_RTR:
1271 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1272 case IB_QPS_RTS:
1273 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1274 case IB_QPS_SQD:
1275 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1276 case IB_QPS_SQE:
1277 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1278 case IB_QPS_ERR:
1279 default:
1280 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1281 }
1282}
1283
1284static enum ib_qp_state __to_ib_qp_state(u8 state)
1285{
1286 switch (state) {
1287 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1288 return IB_QPS_RESET;
1289 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1290 return IB_QPS_INIT;
1291 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1292 return IB_QPS_RTR;
1293 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1294 return IB_QPS_RTS;
1295 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1296 return IB_QPS_SQD;
1297 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1298 return IB_QPS_SQE;
1299 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1300 default:
1301 return IB_QPS_ERR;
1302 }
1303}
1304
1305static u32 __from_ib_mtu(enum ib_mtu mtu)
1306{
1307 switch (mtu) {
1308 case IB_MTU_256:
1309 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1310 case IB_MTU_512:
1311 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1312 case IB_MTU_1024:
1313 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1314 case IB_MTU_2048:
1315 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1316 case IB_MTU_4096:
1317 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1318 default:
1319 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1320 }
1321}
1322
1323static enum ib_mtu __to_ib_mtu(u32 mtu)
1324{
1325 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1326 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1327 return IB_MTU_256;
1328 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1329 return IB_MTU_512;
1330 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1331 return IB_MTU_1024;
1332 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1333 return IB_MTU_2048;
1334 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1335 return IB_MTU_4096;
1336 default:
1337 return IB_MTU_2048;
1338 }
1339}
1340
1ac5a404
SX
1341static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1342 struct bnxt_re_qp *qp1_qp,
1343 int qp_attr_mask)
1344{
1345 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1346 int rc = 0;
1347
1348 if (qp_attr_mask & IB_QP_STATE) {
1349 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1350 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1351 }
1352 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1353 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1354 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1355 }
1356
1357 if (qp_attr_mask & IB_QP_QKEY) {
1358 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1359 /* Using a Random QKEY */
1360 qp->qplib_qp.qkey = 0x81818181;
1361 }
1362 if (qp_attr_mask & IB_QP_SQ_PSN) {
1363 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1364 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1365 }
1366
1367 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1368 if (rc)
1369 dev_err(rdev_to_dev(rdev),
1370 "Failed to modify Shadow QP for QP1");
1371 return rc;
1372}
1373
1374int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1375 int qp_attr_mask, struct ib_udata *udata)
1376{
1377 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1378 struct bnxt_re_dev *rdev = qp->rdev;
1379 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1380 enum ib_qp_state curr_qp_state, new_qp_state;
1381 int rc, entries;
1382 int status;
1383 union ib_gid sgid;
1384 struct ib_gid_attr sgid_attr;
1385 u8 nw_type;
1386
1387 qp->qplib_qp.modify_flags = 0;
1388 if (qp_attr_mask & IB_QP_STATE) {
1389 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1390 new_qp_state = qp_attr->qp_state;
1391 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1392 ib_qp->qp_type, qp_attr_mask,
1393 IB_LINK_LAYER_ETHERNET)) {
1394 dev_err(rdev_to_dev(rdev),
1395 "Invalid attribute mask: %#x specified ",
1396 qp_attr_mask);
1397 dev_err(rdev_to_dev(rdev),
1398 "for qpn: %#x type: %#x",
1399 ib_qp->qp_num, ib_qp->qp_type);
1400 dev_err(rdev_to_dev(rdev),
1401 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1402 curr_qp_state, new_qp_state);
1403 return -EINVAL;
1404 }
1405 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1406 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1407 }
1408 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1409 qp->qplib_qp.modify_flags |=
1410 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1411 qp->qplib_qp.en_sqd_async_notify = true;
1412 }
1413 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1414 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1415 qp->qplib_qp.access =
1416 __from_ib_access_flags(qp_attr->qp_access_flags);
1417 /* LOCAL_WRITE access must be set to allow RC receive */
1418 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1419 }
1420 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1421 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1422 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1423 }
1424 if (qp_attr_mask & IB_QP_QKEY) {
1425 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1426 qp->qplib_qp.qkey = qp_attr->qkey;
1427 }
1428 if (qp_attr_mask & IB_QP_AV) {
d8966fcd
DC
1429 const struct ib_global_route *grh =
1430 rdma_ah_read_grh(&qp_attr->ah_attr);
1431
1ac5a404
SX
1432 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1433 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1434 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1435 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1436 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1437 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1438 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
d8966fcd 1439 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1ac5a404 1440 sizeof(qp->qplib_qp.ah.dgid.data));
d8966fcd 1441 qp->qplib_qp.ah.flow_label = grh->flow_label;
1ac5a404
SX
1442 /* If RoCE V2 is enabled, stack will have two entries for
1443 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1444 * the GID index by 2 for RoCE V2
1445 */
d8966fcd
DC
1446 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1447 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1448 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1449 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1450 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
44c58487
DC
1451 ether_addr_copy(qp->qplib_qp.ah.dmac,
1452 qp_attr->ah_attr.roce.dmac);
1ac5a404
SX
1453
1454 status = ib_get_cached_gid(&rdev->ibdev, 1,
d8966fcd 1455 grh->sgid_index,
1ac5a404
SX
1456 &sgid, &sgid_attr);
1457 if (!status && sgid_attr.ndev) {
1458 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1459 ETH_ALEN);
1460 dev_put(sgid_attr.ndev);
1461 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1462 &sgid);
1463 switch (nw_type) {
1464 case RDMA_NETWORK_IPV4:
1465 qp->qplib_qp.nw_type =
1466 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1467 break;
1468 case RDMA_NETWORK_IPV6:
1469 qp->qplib_qp.nw_type =
1470 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1471 break;
1472 default:
1473 qp->qplib_qp.nw_type =
1474 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1475 break;
1476 }
1477 }
1478 }
1479
1480 if (qp_attr_mask & IB_QP_PATH_MTU) {
1481 qp->qplib_qp.modify_flags |=
1482 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1483 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1484 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1485 qp->qplib_qp.modify_flags |=
1486 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1487 qp->qplib_qp.path_mtu =
1488 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1489 }
1490
1491 if (qp_attr_mask & IB_QP_TIMEOUT) {
1492 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1493 qp->qplib_qp.timeout = qp_attr->timeout;
1494 }
1495 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1496 qp->qplib_qp.modify_flags |=
1497 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1498 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1499 }
1500 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1501 qp->qplib_qp.modify_flags |=
1502 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1503 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1504 }
1505 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1506 qp->qplib_qp.modify_flags |=
1507 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1508 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1509 }
1510 if (qp_attr_mask & IB_QP_RQ_PSN) {
1511 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1512 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1513 }
1514 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1515 qp->qplib_qp.modify_flags |=
1516 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
a25d112f
EW
1517 /* Cap the max_rd_atomic to device max */
1518 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1519 dev_attr->max_qp_rd_atom);
1ac5a404
SX
1520 }
1521 if (qp_attr_mask & IB_QP_SQ_PSN) {
1522 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1523 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1524 }
1525 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
a25d112f
EW
1526 if (qp_attr->max_dest_rd_atomic >
1527 dev_attr->max_qp_init_rd_atom) {
1528 dev_err(rdev_to_dev(rdev),
1529 "max_dest_rd_atomic requested%d is > dev_max%d",
1530 qp_attr->max_dest_rd_atomic,
1531 dev_attr->max_qp_init_rd_atom);
1532 return -EINVAL;
1533 }
1534
1ac5a404
SX
1535 qp->qplib_qp.modify_flags |=
1536 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1537 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1538 }
1539 if (qp_attr_mask & IB_QP_CAP) {
1540 qp->qplib_qp.modify_flags |=
1541 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1542 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1543 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1544 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1545 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1546 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1547 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1548 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1549 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1550 (qp_attr->cap.max_inline_data >=
1551 dev_attr->max_inline_data)) {
1552 dev_err(rdev_to_dev(rdev),
1553 "Create QP failed - max exceeded");
1554 return -EINVAL;
1555 }
1556 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1557 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1558 dev_attr->max_qp_wqes + 1);
9152e0b7
EW
1559 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1560 qp_attr->cap.max_send_wr;
1561 /*
1562 * Reserving one slot for Phantom WQE. Some application can
1563 * post one extra entry in this case. Allowing this to avoid
1564 * unexpected Queue full condition
1565 */
1566 qp->qplib_qp.sq.q_full_delta -= 1;
1ac5a404
SX
1567 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1568 if (qp->qplib_qp.rq.max_wqe) {
1569 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1570 qp->qplib_qp.rq.max_wqe =
1571 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
9152e0b7
EW
1572 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1573 qp_attr->cap.max_recv_wr;
1ac5a404
SX
1574 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1575 } else {
1576 /* SRQ was used prior, just ignore the RQ caps */
1577 }
1578 }
1579 if (qp_attr_mask & IB_QP_DEST_QPN) {
1580 qp->qplib_qp.modify_flags |=
1581 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1582 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1583 }
1584 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1585 if (rc) {
1586 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1587 return rc;
1588 }
1589 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1590 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1591 return rc;
1592}
1593
1594int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1595 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1596{
1597 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1598 struct bnxt_re_dev *rdev = qp->rdev;
1599 struct bnxt_qplib_qp qplib_qp;
1600 int rc;
1601
1602 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1603 qplib_qp.id = qp->qplib_qp.id;
1604 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1605
1606 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1607 if (rc) {
1608 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1609 return rc;
1610 }
1611 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1612 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1613 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1614 qp_attr->pkey_index = qplib_qp.pkey_index;
1615 qp_attr->qkey = qplib_qp.qkey;
44c58487 1616 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
d8966fcd
DC
1617 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1618 qplib_qp.ah.host_sgid_index,
1619 qplib_qp.ah.hop_limit,
1620 qplib_qp.ah.traffic_class);
1621 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1622 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
44c58487 1623 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
1ac5a404
SX
1624 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1625 qp_attr->timeout = qplib_qp.timeout;
1626 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1627 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1628 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1629 qp_attr->rq_psn = qplib_qp.rq.psn;
1630 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1631 qp_attr->sq_psn = qplib_qp.sq.psn;
1632 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1633 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1634 IB_SIGNAL_REQ_WR;
1635 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1636
1637 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1638 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1639 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1640 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1641 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1642 qp_init_attr->cap = qp_attr->cap;
1643
1644 return 0;
1645}
1646
1647/* Routine for sending QP1 packets for RoCE V1 an V2
1648 */
1649static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1650 struct ib_send_wr *wr,
1651 struct bnxt_qplib_swqe *wqe,
1652 int payload_size)
1653{
1654 struct ib_device *ibdev = &qp->rdev->ibdev;
1655 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1656 ib_ah);
1657 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1658 struct bnxt_qplib_sge sge;
1659 union ib_gid sgid;
1660 u8 nw_type;
1661 u16 ether_type;
1662 struct ib_gid_attr sgid_attr;
1663 union ib_gid dgid;
1664 bool is_eth = false;
1665 bool is_vlan = false;
1666 bool is_grh = false;
1667 bool is_udp = false;
1668 u8 ip_version = 0;
1669 u16 vlan_id = 0xFFFF;
1670 void *buf;
1671 int i, rc = 0, size;
1672
1673 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1674
1675 rc = ib_get_cached_gid(ibdev, 1,
1676 qplib_ah->host_sgid_index, &sgid,
1677 &sgid_attr);
1678 if (rc) {
1679 dev_err(rdev_to_dev(qp->rdev),
1680 "Failed to query gid at index %d",
1681 qplib_ah->host_sgid_index);
1682 return rc;
1683 }
1684 if (sgid_attr.ndev) {
1685 if (is_vlan_dev(sgid_attr.ndev))
1686 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1687 dev_put(sgid_attr.ndev);
1688 }
1689 /* Get network header type for this GID */
1690 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1691 switch (nw_type) {
1692 case RDMA_NETWORK_IPV4:
1693 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1694 break;
1695 case RDMA_NETWORK_IPV6:
1696 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1697 break;
1698 default:
1699 nw_type = BNXT_RE_ROCE_V1_PACKET;
1700 break;
1701 }
1702 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1703 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1704 if (is_udp) {
1705 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1706 ip_version = 4;
1707 ether_type = ETH_P_IP;
1708 } else {
1709 ip_version = 6;
1710 ether_type = ETH_P_IPV6;
1711 }
1712 is_grh = false;
1713 } else {
1714 ether_type = ETH_P_IBOE;
1715 is_grh = true;
1716 }
1717
1718 is_eth = true;
1719 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1720
1721 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1722 ip_version, is_udp, 0, &qp->qp1_hdr);
1723
1724 /* ETH */
1725 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1726 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1727
1728 /* For vlan, check the sgid for vlan existence */
1729
1730 if (!is_vlan) {
1731 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1732 } else {
1733 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1734 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1735 }
1736
1737 if (is_grh || (ip_version == 6)) {
1738 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1739 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1740 sizeof(sgid));
1741 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1742 }
1743
1744 if (ip_version == 4) {
1745 qp->qp1_hdr.ip4.tos = 0;
1746 qp->qp1_hdr.ip4.id = 0;
1747 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1748 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1749
1750 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1751 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1752 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1753 }
1754
1755 if (is_udp) {
1756 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1757 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1758 qp->qp1_hdr.udp.csum = 0;
1759 }
1760
1761 /* BTH */
1762 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1763 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1764 qp->qp1_hdr.immediate_present = 1;
1765 } else {
1766 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1767 }
1768 if (wr->send_flags & IB_SEND_SOLICITED)
1769 qp->qp1_hdr.bth.solicited_event = 1;
1770 /* pad_count */
1771 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1772
1773 /* P_key for QP1 is for all members */
1774 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1775 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1776 qp->qp1_hdr.bth.ack_req = 0;
1777 qp->send_psn++;
1778 qp->send_psn &= BTH_PSN_MASK;
1779 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1780 /* DETH */
1781 /* Use the priviledged Q_Key for QP1 */
1782 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1783 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1784
1785 /* Pack the QP1 to the transmit buffer */
1786 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1787 if (buf) {
1788 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1789 for (i = wqe->num_sge; i; i--) {
1790 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1791 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1792 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1793 }
1794
1795 /*
1796 * Max Header buf size for IPV6 RoCE V2 is 86,
1797 * which is same as the QP1 SQ header buffer.
1798 * Header buf size for IPV4 RoCE V2 can be 66.
1799 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1800 * Subtract 20 bytes from QP1 SQ header buf size
1801 */
1802 if (is_udp && ip_version == 4)
1803 sge.size -= 20;
1804 /*
1805 * Max Header buf size for RoCE V1 is 78.
1806 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1807 * Subtract 8 bytes from QP1 SQ header buf size
1808 */
1809 if (!is_udp)
1810 sge.size -= 8;
1811
1812 /* Subtract 4 bytes for non vlan packets */
1813 if (!is_vlan)
1814 sge.size -= 4;
1815
1816 wqe->sg_list[0].addr = sge.addr;
1817 wqe->sg_list[0].lkey = sge.lkey;
1818 wqe->sg_list[0].size = sge.size;
1819 wqe->num_sge++;
1820
1821 } else {
1822 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1823 rc = -ENOMEM;
1824 }
1825 return rc;
1826}
1827
1828/* For the MAD layer, it only provides the recv SGE the size of
1829 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1830 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1831 * receive packet (334 bytes) with no VLAN and then copy the GRH
1832 * and the MAD datagram out to the provided SGE.
1833 */
1834static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1835 struct ib_recv_wr *wr,
1836 struct bnxt_qplib_swqe *wqe,
1837 int payload_size)
1838{
1839 struct bnxt_qplib_sge ref, sge;
1840 u32 rq_prod_index;
1841 struct bnxt_re_sqp_entries *sqp_entry;
1842
1843 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1844
1845 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1846 return -ENOMEM;
1847
1848 /* Create 1 SGE to receive the entire
1849 * ethernet packet
1850 */
1851 /* Save the reference from ULP */
1852 ref.addr = wqe->sg_list[0].addr;
1853 ref.lkey = wqe->sg_list[0].lkey;
1854 ref.size = wqe->sg_list[0].size;
1855
1856 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1857
1858 /* SGE 1 */
1859 wqe->sg_list[0].addr = sge.addr;
1860 wqe->sg_list[0].lkey = sge.lkey;
1861 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1862 sge.size -= wqe->sg_list[0].size;
1863
1864 sqp_entry->sge.addr = ref.addr;
1865 sqp_entry->sge.lkey = ref.lkey;
1866 sqp_entry->sge.size = ref.size;
1867 /* Store the wrid for reporting completion */
1868 sqp_entry->wrid = wqe->wr_id;
1869 /* change the wqe->wrid to table index */
1870 wqe->wr_id = rq_prod_index;
1871 return 0;
1872}
1873
1874static int is_ud_qp(struct bnxt_re_qp *qp)
1875{
1876 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1877}
1878
1879static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1880 struct ib_send_wr *wr,
1881 struct bnxt_qplib_swqe *wqe)
1882{
1883 struct bnxt_re_ah *ah = NULL;
1884
1885 if (is_ud_qp(qp)) {
1886 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1887 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1888 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1889 wqe->send.avid = ah->qplib_ah.id;
1890 }
1891 switch (wr->opcode) {
1892 case IB_WR_SEND:
1893 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1894 break;
1895 case IB_WR_SEND_WITH_IMM:
1896 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1897 wqe->send.imm_data = wr->ex.imm_data;
1898 break;
1899 case IB_WR_SEND_WITH_INV:
1900 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1901 wqe->send.inv_key = wr->ex.invalidate_rkey;
1902 break;
1903 default:
1904 return -EINVAL;
1905 }
1906 if (wr->send_flags & IB_SEND_SIGNALED)
1907 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1908 if (wr->send_flags & IB_SEND_FENCE)
1909 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1910 if (wr->send_flags & IB_SEND_SOLICITED)
1911 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1912 if (wr->send_flags & IB_SEND_INLINE)
1913 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1914
1915 return 0;
1916}
1917
1918static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1919 struct bnxt_qplib_swqe *wqe)
1920{
1921 switch (wr->opcode) {
1922 case IB_WR_RDMA_WRITE:
1923 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1924 break;
1925 case IB_WR_RDMA_WRITE_WITH_IMM:
1926 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1927 wqe->rdma.imm_data = wr->ex.imm_data;
1928 break;
1929 case IB_WR_RDMA_READ:
1930 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1931 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1932 break;
1933 default:
1934 return -EINVAL;
1935 }
1936 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1937 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1938 if (wr->send_flags & IB_SEND_SIGNALED)
1939 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1940 if (wr->send_flags & IB_SEND_FENCE)
1941 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1942 if (wr->send_flags & IB_SEND_SOLICITED)
1943 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1944 if (wr->send_flags & IB_SEND_INLINE)
1945 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1946
1947 return 0;
1948}
1949
1950static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1951 struct bnxt_qplib_swqe *wqe)
1952{
1953 switch (wr->opcode) {
1954 case IB_WR_ATOMIC_CMP_AND_SWP:
1955 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1956 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1957 break;
1958 case IB_WR_ATOMIC_FETCH_AND_ADD:
1959 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1960 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1961 break;
1962 default:
1963 return -EINVAL;
1964 }
1965 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1966 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1967 if (wr->send_flags & IB_SEND_SIGNALED)
1968 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1969 if (wr->send_flags & IB_SEND_FENCE)
1970 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1971 if (wr->send_flags & IB_SEND_SOLICITED)
1972 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1973 return 0;
1974}
1975
1976static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1977 struct bnxt_qplib_swqe *wqe)
1978{
1979 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1980 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1981
1982 if (wr->send_flags & IB_SEND_SIGNALED)
1983 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1984 if (wr->send_flags & IB_SEND_FENCE)
1985 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1986 if (wr->send_flags & IB_SEND_SOLICITED)
1987 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1988
1989 return 0;
1990}
1991
1992static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1993 struct bnxt_qplib_swqe *wqe)
1994{
1995 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1996 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1997 int access = wr->access;
1998
1999 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2000 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2001 wqe->frmr.page_list = mr->pages;
2002 wqe->frmr.page_list_len = mr->npages;
2003 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2004 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2005
2006 if (wr->wr.send_flags & IB_SEND_FENCE)
2007 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2008 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2009 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2010
2011 if (access & IB_ACCESS_LOCAL_WRITE)
2012 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2013 if (access & IB_ACCESS_REMOTE_READ)
2014 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2015 if (access & IB_ACCESS_REMOTE_WRITE)
2016 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2017 if (access & IB_ACCESS_REMOTE_ATOMIC)
2018 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2019 if (access & IB_ACCESS_MW_BIND)
2020 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2021
2022 wqe->frmr.l_key = wr->key;
2023 wqe->frmr.length = wr->mr->length;
2024 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2025 wqe->frmr.va = wr->mr->iova;
2026 return 0;
2027}
2028
2029static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2030 struct ib_send_wr *wr,
2031 struct bnxt_qplib_swqe *wqe)
2032{
2033 /* Copy the inline data to the data field */
2034 u8 *in_data;
2035 u32 i, sge_len;
2036 void *sge_addr;
2037
2038 in_data = wqe->inline_data;
2039 for (i = 0; i < wr->num_sge; i++) {
2040 sge_addr = (void *)(unsigned long)
2041 wr->sg_list[i].addr;
2042 sge_len = wr->sg_list[i].length;
2043
2044 if ((sge_len + wqe->inline_len) >
2045 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2046 dev_err(rdev_to_dev(rdev),
2047 "Inline data size requested > supported value");
2048 return -EINVAL;
2049 }
2050 sge_len = wr->sg_list[i].length;
2051
2052 memcpy(in_data, sge_addr, sge_len);
2053 in_data += wr->sg_list[i].length;
2054 wqe->inline_len += wr->sg_list[i].length;
2055 }
2056 return wqe->inline_len;
2057}
2058
2059static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2060 struct ib_send_wr *wr,
2061 struct bnxt_qplib_swqe *wqe)
2062{
2063 int payload_sz = 0;
2064
2065 if (wr->send_flags & IB_SEND_INLINE)
2066 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2067 else
2068 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2069 wqe->num_sge);
2070
2071 return payload_sz;
2072}
2073
3fb755b3
SK
2074static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2075{
2076 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2077 qp->ib_qp.qp_type == IB_QPT_GSI ||
2078 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2079 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2080 int qp_attr_mask;
2081 struct ib_qp_attr qp_attr;
2082
2083 qp_attr_mask = IB_QP_STATE;
2084 qp_attr.qp_state = IB_QPS_RTS;
2085 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2086 qp->qplib_qp.wqe_cnt = 0;
2087 }
2088}
2089
1ac5a404
SX
2090static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2091 struct bnxt_re_qp *qp,
2092 struct ib_send_wr *wr)
2093{
2094 struct bnxt_qplib_swqe wqe;
2095 int rc = 0, payload_sz = 0;
2096 unsigned long flags;
2097
2098 spin_lock_irqsave(&qp->sq_lock, flags);
2099 memset(&wqe, 0, sizeof(wqe));
2100 while (wr) {
2101 /* House keeping */
2102 memset(&wqe, 0, sizeof(wqe));
2103
2104 /* Common */
2105 wqe.num_sge = wr->num_sge;
2106 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2107 dev_err(rdev_to_dev(rdev),
2108 "Limit exceeded for Send SGEs");
2109 rc = -EINVAL;
2110 goto bad;
2111 }
2112
2113 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2114 if (payload_sz < 0) {
2115 rc = -EINVAL;
2116 goto bad;
2117 }
2118 wqe.wr_id = wr->wr_id;
2119
2120 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2121
2122 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2123 if (!rc)
2124 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2125bad:
2126 if (rc) {
2127 dev_err(rdev_to_dev(rdev),
2128 "Post send failed opcode = %#x rc = %d",
2129 wr->opcode, rc);
2130 break;
2131 }
2132 wr = wr->next;
2133 }
2134 bnxt_qplib_post_send_db(&qp->qplib_qp);
3fb755b3 2135 bnxt_ud_qp_hw_stall_workaround(qp);
1ac5a404
SX
2136 spin_unlock_irqrestore(&qp->sq_lock, flags);
2137 return rc;
2138}
2139
2140int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2141 struct ib_send_wr **bad_wr)
2142{
2143 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2144 struct bnxt_qplib_swqe wqe;
2145 int rc = 0, payload_sz = 0;
2146 unsigned long flags;
2147
2148 spin_lock_irqsave(&qp->sq_lock, flags);
2149 while (wr) {
2150 /* House keeping */
2151 memset(&wqe, 0, sizeof(wqe));
2152
2153 /* Common */
2154 wqe.num_sge = wr->num_sge;
2155 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2156 dev_err(rdev_to_dev(qp->rdev),
2157 "Limit exceeded for Send SGEs");
2158 rc = -EINVAL;
2159 goto bad;
2160 }
2161
2162 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2163 if (payload_sz < 0) {
2164 rc = -EINVAL;
2165 goto bad;
2166 }
2167 wqe.wr_id = wr->wr_id;
2168
2169 switch (wr->opcode) {
2170 case IB_WR_SEND:
2171 case IB_WR_SEND_WITH_IMM:
2172 if (ib_qp->qp_type == IB_QPT_GSI) {
2173 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2174 payload_sz);
2175 if (rc)
2176 goto bad;
2177 wqe.rawqp1.lflags |=
2178 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2179 }
2180 switch (wr->send_flags) {
2181 case IB_SEND_IP_CSUM:
2182 wqe.rawqp1.lflags |=
2183 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2184 break;
2185 default:
2186 break;
2187 }
2188 /* Fall thru to build the wqe */
2189 case IB_WR_SEND_WITH_INV:
2190 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2191 break;
2192 case IB_WR_RDMA_WRITE:
2193 case IB_WR_RDMA_WRITE_WITH_IMM:
2194 case IB_WR_RDMA_READ:
2195 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2196 break;
2197 case IB_WR_ATOMIC_CMP_AND_SWP:
2198 case IB_WR_ATOMIC_FETCH_AND_ADD:
2199 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2200 break;
2201 case IB_WR_RDMA_READ_WITH_INV:
2202 dev_err(rdev_to_dev(qp->rdev),
2203 "RDMA Read with Invalidate is not supported");
2204 rc = -EINVAL;
2205 goto bad;
2206 case IB_WR_LOCAL_INV:
2207 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2208 break;
2209 case IB_WR_REG_MR:
2210 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2211 break;
2212 default:
2213 /* Unsupported WRs */
2214 dev_err(rdev_to_dev(qp->rdev),
2215 "WR (%#x) is not supported", wr->opcode);
2216 rc = -EINVAL;
2217 goto bad;
2218 }
2219 if (!rc)
2220 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2221bad:
2222 if (rc) {
2223 dev_err(rdev_to_dev(qp->rdev),
2224 "post_send failed op:%#x qps = %#x rc = %d\n",
2225 wr->opcode, qp->qplib_qp.state, rc);
2226 *bad_wr = wr;
2227 break;
2228 }
2229 wr = wr->next;
2230 }
2231 bnxt_qplib_post_send_db(&qp->qplib_qp);
3fb755b3 2232 bnxt_ud_qp_hw_stall_workaround(qp);
1ac5a404
SX
2233 spin_unlock_irqrestore(&qp->sq_lock, flags);
2234
2235 return rc;
2236}
2237
2238static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2239 struct bnxt_re_qp *qp,
2240 struct ib_recv_wr *wr)
2241{
2242 struct bnxt_qplib_swqe wqe;
2243 int rc = 0, payload_sz = 0;
2244
2245 memset(&wqe, 0, sizeof(wqe));
2246 while (wr) {
2247 /* House keeping */
2248 memset(&wqe, 0, sizeof(wqe));
2249
2250 /* Common */
2251 wqe.num_sge = wr->num_sge;
2252 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2253 dev_err(rdev_to_dev(rdev),
2254 "Limit exceeded for Receive SGEs");
2255 rc = -EINVAL;
2256 break;
2257 }
2258 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2259 wr->num_sge);
2260 wqe.wr_id = wr->wr_id;
2261 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2262
2263 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2264 if (rc)
2265 break;
2266
2267 wr = wr->next;
2268 }
2269 if (!rc)
2270 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2271 return rc;
2272}
2273
2274int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2275 struct ib_recv_wr **bad_wr)
2276{
2277 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2278 struct bnxt_qplib_swqe wqe;
2279 int rc = 0, payload_sz = 0;
018cf599
DS
2280 unsigned long flags;
2281 u32 count = 0;
1ac5a404 2282
018cf599 2283 spin_lock_irqsave(&qp->rq_lock, flags);
1ac5a404
SX
2284 while (wr) {
2285 /* House keeping */
2286 memset(&wqe, 0, sizeof(wqe));
2287
2288 /* Common */
2289 wqe.num_sge = wr->num_sge;
2290 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2291 dev_err(rdev_to_dev(qp->rdev),
2292 "Limit exceeded for Receive SGEs");
2293 rc = -EINVAL;
2294 *bad_wr = wr;
2295 break;
2296 }
2297
2298 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2299 wr->num_sge);
2300 wqe.wr_id = wr->wr_id;
2301 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2302
2303 if (ib_qp->qp_type == IB_QPT_GSI)
2304 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2305 payload_sz);
2306 if (!rc)
2307 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2308 if (rc) {
2309 *bad_wr = wr;
2310 break;
2311 }
018cf599
DS
2312
2313 /* Ring DB if the RQEs posted reaches a threshold value */
2314 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2315 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2316 count = 0;
2317 }
2318
1ac5a404
SX
2319 wr = wr->next;
2320 }
018cf599
DS
2321
2322 if (count)
2323 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2324
2325 spin_unlock_irqrestore(&qp->rq_lock, flags);
2326
1ac5a404
SX
2327 return rc;
2328}
2329
2330/* Completion Queues */
2331int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2332{
2333 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2334 struct bnxt_re_dev *rdev = cq->rdev;
2335 int rc;
2336
2337 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2338 if (rc) {
2339 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2340 return rc;
2341 }
374cb861 2342 if (!IS_ERR_OR_NULL(cq->umem))
1ac5a404
SX
2343 ib_umem_release(cq->umem);
2344
2345 if (cq) {
2346 kfree(cq->cql);
2347 kfree(cq);
2348 }
2349 atomic_dec(&rdev->cq_count);
2350 rdev->nq.budget--;
2351 return 0;
2352}
2353
2354struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2355 const struct ib_cq_init_attr *attr,
2356 struct ib_ucontext *context,
2357 struct ib_udata *udata)
2358{
2359 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2360 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2361 struct bnxt_re_cq *cq = NULL;
2362 int rc, entries;
2363 int cqe = attr->cqe;
2364
2365 /* Validate CQ fields */
2366 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2367 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2368 return ERR_PTR(-EINVAL);
2369 }
2370 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2371 if (!cq)
2372 return ERR_PTR(-ENOMEM);
2373
2374 cq->rdev = rdev;
2375 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2376
2377 entries = roundup_pow_of_two(cqe + 1);
2378 if (entries > dev_attr->max_cq_wqes + 1)
2379 entries = dev_attr->max_cq_wqes + 1;
2380
2381 if (context) {
2382 struct bnxt_re_cq_req req;
2383 struct bnxt_re_ucontext *uctx = container_of
2384 (context,
2385 struct bnxt_re_ucontext,
2386 ib_uctx);
2387 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2388 rc = -EFAULT;
2389 goto fail;
2390 }
2391
2392 cq->umem = ib_umem_get(context, req.cq_va,
2393 entries * sizeof(struct cq_base),
2394 IB_ACCESS_LOCAL_WRITE, 1);
2395 if (IS_ERR(cq->umem)) {
2396 rc = PTR_ERR(cq->umem);
2397 goto fail;
2398 }
2399 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2400 cq->qplib_cq.nmap = cq->umem->nmap;
b3b2c7c5 2401 cq->qplib_cq.dpi = &uctx->dpi;
1ac5a404
SX
2402 } else {
2403 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2404 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2405 GFP_KERNEL);
2406 if (!cq->cql) {
2407 rc = -ENOMEM;
2408 goto fail;
2409 }
2410
2411 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2412 cq->qplib_cq.sghead = NULL;
2413 cq->qplib_cq.nmap = 0;
2414 }
2415 cq->qplib_cq.max_wqe = entries;
2416 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2417
2418 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2419 if (rc) {
2420 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2421 goto fail;
2422 }
2423
2424 cq->ib_cq.cqe = entries;
2425 cq->cq_period = cq->qplib_cq.period;
2426 rdev->nq.budget++;
2427
2428 atomic_inc(&rdev->cq_count);
2429
2430 if (context) {
2431 struct bnxt_re_cq_resp resp;
2432
2433 resp.cqid = cq->qplib_cq.id;
2434 resp.tail = cq->qplib_cq.hwq.cons;
2435 resp.phase = cq->qplib_cq.period;
2436 resp.rsvd = 0;
2437 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2438 if (rc) {
2439 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2440 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2441 goto c2fail;
2442 }
2443 }
2444
2445 return &cq->ib_cq;
2446
2447c2fail:
2448 if (context)
2449 ib_umem_release(cq->umem);
2450fail:
2451 kfree(cq->cql);
2452 kfree(cq);
2453 return ERR_PTR(rc);
2454}
2455
2456static u8 __req_to_ib_wc_status(u8 qstatus)
2457{
2458 switch (qstatus) {
2459 case CQ_REQ_STATUS_OK:
2460 return IB_WC_SUCCESS;
2461 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2462 return IB_WC_BAD_RESP_ERR;
2463 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2464 return IB_WC_LOC_LEN_ERR;
2465 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2466 return IB_WC_LOC_QP_OP_ERR;
2467 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2468 return IB_WC_LOC_PROT_ERR;
2469 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2470 return IB_WC_GENERAL_ERR;
2471 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2472 return IB_WC_REM_INV_REQ_ERR;
2473 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2474 return IB_WC_REM_ACCESS_ERR;
2475 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2476 return IB_WC_REM_OP_ERR;
2477 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2478 return IB_WC_RNR_RETRY_EXC_ERR;
2479 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2480 return IB_WC_RETRY_EXC_ERR;
2481 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2482 return IB_WC_WR_FLUSH_ERR;
2483 default:
2484 return IB_WC_GENERAL_ERR;
2485 }
2486 return 0;
2487}
2488
2489static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2490{
2491 switch (qstatus) {
2492 case CQ_RES_RAWETH_QP1_STATUS_OK:
2493 return IB_WC_SUCCESS;
2494 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2495 return IB_WC_LOC_ACCESS_ERR;
2496 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2497 return IB_WC_LOC_LEN_ERR;
2498 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2499 return IB_WC_LOC_PROT_ERR;
2500 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2501 return IB_WC_LOC_QP_OP_ERR;
2502 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2503 return IB_WC_GENERAL_ERR;
2504 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2505 return IB_WC_WR_FLUSH_ERR;
2506 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2507 return IB_WC_WR_FLUSH_ERR;
2508 default:
2509 return IB_WC_GENERAL_ERR;
2510 }
2511}
2512
2513static u8 __rc_to_ib_wc_status(u8 qstatus)
2514{
2515 switch (qstatus) {
2516 case CQ_RES_RC_STATUS_OK:
2517 return IB_WC_SUCCESS;
2518 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2519 return IB_WC_LOC_ACCESS_ERR;
2520 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2521 return IB_WC_LOC_LEN_ERR;
2522 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2523 return IB_WC_LOC_PROT_ERR;
2524 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2525 return IB_WC_LOC_QP_OP_ERR;
2526 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2527 return IB_WC_GENERAL_ERR;
2528 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2529 return IB_WC_REM_INV_REQ_ERR;
2530 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2531 return IB_WC_WR_FLUSH_ERR;
2532 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2533 return IB_WC_WR_FLUSH_ERR;
2534 default:
2535 return IB_WC_GENERAL_ERR;
2536 }
2537}
2538
2539static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2540{
2541 switch (cqe->type) {
2542 case BNXT_QPLIB_SWQE_TYPE_SEND:
2543 wc->opcode = IB_WC_SEND;
2544 break;
2545 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2546 wc->opcode = IB_WC_SEND;
2547 wc->wc_flags |= IB_WC_WITH_IMM;
2548 break;
2549 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2550 wc->opcode = IB_WC_SEND;
2551 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2552 break;
2553 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2554 wc->opcode = IB_WC_RDMA_WRITE;
2555 break;
2556 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2557 wc->opcode = IB_WC_RDMA_WRITE;
2558 wc->wc_flags |= IB_WC_WITH_IMM;
2559 break;
2560 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2561 wc->opcode = IB_WC_RDMA_READ;
2562 break;
2563 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2564 wc->opcode = IB_WC_COMP_SWAP;
2565 break;
2566 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2567 wc->opcode = IB_WC_FETCH_ADD;
2568 break;
2569 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2570 wc->opcode = IB_WC_LOCAL_INV;
2571 break;
2572 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2573 wc->opcode = IB_WC_REG_MR;
2574 break;
2575 default:
2576 wc->opcode = IB_WC_SEND;
2577 break;
2578 }
2579
2580 wc->status = __req_to_ib_wc_status(cqe->status);
2581}
2582
2583static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2584 u16 raweth_qp1_flags2)
2585{
2586 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2587
2588 /* raweth_qp1_flags Bit 9-6 indicates itype */
2589 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2590 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2591 return -1;
2592
2593 if (raweth_qp1_flags2 &
2594 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2595 raweth_qp1_flags2 &
2596 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2597 is_udp = true;
2598 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2599 (raweth_qp1_flags2 &
2600 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2601 (is_ipv6 = true) : (is_ipv4 = true);
2602 return ((is_ipv6) ?
2603 BNXT_RE_ROCEV2_IPV6_PACKET :
2604 BNXT_RE_ROCEV2_IPV4_PACKET);
2605 } else {
2606 return BNXT_RE_ROCE_V1_PACKET;
2607 }
2608}
2609
2610static int bnxt_re_to_ib_nw_type(int nw_type)
2611{
2612 u8 nw_hdr_type = 0xFF;
2613
2614 switch (nw_type) {
2615 case BNXT_RE_ROCE_V1_PACKET:
2616 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2617 break;
2618 case BNXT_RE_ROCEV2_IPV4_PACKET:
2619 nw_hdr_type = RDMA_NETWORK_IPV4;
2620 break;
2621 case BNXT_RE_ROCEV2_IPV6_PACKET:
2622 nw_hdr_type = RDMA_NETWORK_IPV6;
2623 break;
2624 }
2625 return nw_hdr_type;
2626}
2627
2628static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2629 void *rq_hdr_buf)
2630{
2631 u8 *tmp_buf = NULL;
2632 struct ethhdr *eth_hdr;
2633 u16 eth_type;
2634 bool rc = false;
2635
2636 tmp_buf = (u8 *)rq_hdr_buf;
2637 /*
2638 * If dest mac is not same as I/F mac, this could be a
2639 * loopback address or multicast address, check whether
2640 * it is a loopback packet
2641 */
2642 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2643 tmp_buf += 4;
2644 /* Check the ether type */
2645 eth_hdr = (struct ethhdr *)tmp_buf;
2646 eth_type = ntohs(eth_hdr->h_proto);
2647 switch (eth_type) {
2648 case ETH_P_IBOE:
2649 rc = true;
2650 break;
2651 case ETH_P_IP:
2652 case ETH_P_IPV6: {
2653 u32 len;
2654 struct udphdr *udp_hdr;
2655
2656 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2657 sizeof(struct ipv6hdr));
2658 tmp_buf += sizeof(struct ethhdr) + len;
2659 udp_hdr = (struct udphdr *)tmp_buf;
2660 if (ntohs(udp_hdr->dest) ==
2661 ROCE_V2_UDP_DPORT)
2662 rc = true;
2663 break;
2664 }
2665 default:
2666 break;
2667 }
2668 }
2669
2670 return rc;
2671}
2672
2673static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2674 struct bnxt_qplib_cqe *cqe)
2675{
2676 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2677 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2678 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2679 struct ib_send_wr *swr;
2680 struct ib_ud_wr udwr;
2681 struct ib_recv_wr rwr;
2682 int pkt_type = 0;
2683 u32 tbl_idx;
2684 void *rq_hdr_buf;
2685 dma_addr_t rq_hdr_buf_map;
2686 dma_addr_t shrq_hdr_buf_map;
2687 u32 offset = 0;
2688 u32 skip_bytes = 0;
2689 struct ib_sge s_sge[2];
2690 struct ib_sge r_sge[2];
2691 int rc;
2692
2693 memset(&udwr, 0, sizeof(udwr));
2694 memset(&rwr, 0, sizeof(rwr));
2695 memset(&s_sge, 0, sizeof(s_sge));
2696 memset(&r_sge, 0, sizeof(r_sge));
2697
2698 swr = &udwr.wr;
2699 tbl_idx = cqe->wr_id;
2700
2701 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2702 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2703 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2704 tbl_idx);
2705
2706 /* Shadow QP header buffer */
2707 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2708 tbl_idx);
2709 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2710
2711 /* Store this cqe */
2712 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2713 sqp_entry->qp1_qp = qp1_qp;
2714
2715 /* Find packet type from the cqe */
2716
2717 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2718 cqe->raweth_qp1_flags2);
2719 if (pkt_type < 0) {
2720 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2721 return -EINVAL;
2722 }
2723
2724 /* Adjust the offset for the user buffer and post in the rq */
2725
2726 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2727 offset = 20;
2728
2729 /*
2730 * QP1 loopback packet has 4 bytes of internal header before
2731 * ether header. Skip these four bytes.
2732 */
2733 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2734 skip_bytes = 4;
2735
2736 /* First send SGE . Skip the ether header*/
2737 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2738 + skip_bytes;
2739 s_sge[0].lkey = 0xFFFFFFFF;
2740 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2741 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2742
2743 /* Second Send SGE */
2744 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2745 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2746 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2747 s_sge[1].addr += 8;
2748 s_sge[1].lkey = 0xFFFFFFFF;
2749 s_sge[1].length = 256;
2750
2751 /* First recv SGE */
2752
2753 r_sge[0].addr = shrq_hdr_buf_map;
2754 r_sge[0].lkey = 0xFFFFFFFF;
2755 r_sge[0].length = 40;
2756
2757 r_sge[1].addr = sqp_entry->sge.addr + offset;
2758 r_sge[1].lkey = sqp_entry->sge.lkey;
2759 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2760
2761 /* Create receive work request */
2762 rwr.num_sge = 2;
2763 rwr.sg_list = r_sge;
2764 rwr.wr_id = tbl_idx;
2765 rwr.next = NULL;
2766
2767 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2768 if (rc) {
2769 dev_err(rdev_to_dev(rdev),
2770 "Failed to post Rx buffers to shadow QP");
2771 return -ENOMEM;
2772 }
2773
2774 swr->num_sge = 2;
2775 swr->sg_list = s_sge;
2776 swr->wr_id = tbl_idx;
2777 swr->opcode = IB_WR_SEND;
2778 swr->next = NULL;
2779
2780 udwr.ah = &rdev->sqp_ah->ib_ah;
2781 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2782 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2783
2784 /* post data received in the send queue */
2785 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2786
2787 return 0;
2788}
2789
2790static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2791 struct bnxt_qplib_cqe *cqe)
2792{
2793 wc->opcode = IB_WC_RECV;
2794 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2795 wc->wc_flags |= IB_WC_GRH;
2796}
2797
2798static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2799 struct bnxt_qplib_cqe *cqe)
2800{
2801 wc->opcode = IB_WC_RECV;
2802 wc->status = __rc_to_ib_wc_status(cqe->status);
2803
2804 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2805 wc->wc_flags |= IB_WC_WITH_IMM;
2806 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2807 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2808 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2809 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2810 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2811}
2812
2813static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2814 struct ib_wc *wc,
2815 struct bnxt_qplib_cqe *cqe)
2816{
2817 u32 tbl_idx;
2818 struct bnxt_re_dev *rdev = qp->rdev;
2819 struct bnxt_re_qp *qp1_qp = NULL;
2820 struct bnxt_qplib_cqe *orig_cqe = NULL;
2821 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2822 int nw_type;
2823
2824 tbl_idx = cqe->wr_id;
2825
2826 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2827 qp1_qp = sqp_entry->qp1_qp;
2828 orig_cqe = &sqp_entry->cqe;
2829
2830 wc->wr_id = sqp_entry->wrid;
2831 wc->byte_len = orig_cqe->length;
2832 wc->qp = &qp1_qp->ib_qp;
2833
2834 wc->ex.imm_data = orig_cqe->immdata;
2835 wc->src_qp = orig_cqe->src_qp;
2836 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2837 wc->port_num = 1;
2838 wc->vendor_err = orig_cqe->status;
2839
2840 wc->opcode = IB_WC_RECV;
2841 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2842 wc->wc_flags |= IB_WC_GRH;
2843
2844 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2845 orig_cqe->raweth_qp1_flags2);
2846 if (nw_type >= 0) {
2847 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2848 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2849 }
2850}
2851
2852static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2853 struct bnxt_qplib_cqe *cqe)
2854{
2855 wc->opcode = IB_WC_RECV;
2856 wc->status = __rc_to_ib_wc_status(cqe->status);
2857
2858 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2859 wc->wc_flags |= IB_WC_WITH_IMM;
2860 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2861 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2862 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2863 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2864 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2865}
2866
9152e0b7
EW
2867static int send_phantom_wqe(struct bnxt_re_qp *qp)
2868{
2869 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2870 unsigned long flags;
2871 int rc = 0;
2872
2873 spin_lock_irqsave(&qp->sq_lock, flags);
2874
2875 rc = bnxt_re_bind_fence_mw(lib_qp);
2876 if (!rc) {
2877 lib_qp->sq.phantom_wqe_cnt++;
2878 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2879 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2880 lib_qp->id, lib_qp->sq.hwq.prod,
2881 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2882 lib_qp->sq.phantom_wqe_cnt);
2883 }
2884
2885 spin_unlock_irqrestore(&qp->sq_lock, flags);
2886 return rc;
2887}
2888
1ac5a404
SX
2889int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2890{
2891 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2892 struct bnxt_re_qp *qp;
2893 struct bnxt_qplib_cqe *cqe;
2894 int i, ncqe, budget;
9152e0b7
EW
2895 struct bnxt_qplib_q *sq;
2896 struct bnxt_qplib_qp *lib_qp;
1ac5a404
SX
2897 u32 tbl_idx;
2898 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2899 unsigned long flags;
2900
2901 spin_lock_irqsave(&cq->cq_lock, flags);
2902 budget = min_t(u32, num_entries, cq->max_cql);
10d1dedf 2903 num_entries = budget;
1ac5a404
SX
2904 if (!cq->cql) {
2905 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2906 goto exit;
2907 }
2908 cqe = &cq->cql[0];
2909 while (budget) {
9152e0b7
EW
2910 lib_qp = NULL;
2911 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2912 if (lib_qp) {
2913 sq = &lib_qp->sq;
2914 if (sq->send_phantom) {
2915 qp = container_of(lib_qp,
2916 struct bnxt_re_qp, qplib_qp);
2917 if (send_phantom_wqe(qp) == -ENOMEM)
2918 dev_err(rdev_to_dev(cq->rdev),
2919 "Phantom failed! Scheduled to send again\n");
2920 else
2921 sq->send_phantom = false;
2922 }
2923 }
2924
1ac5a404
SX
2925 if (!ncqe)
2926 break;
2927
2928 for (i = 0; i < ncqe; i++, cqe++) {
2929 /* Transcribe each qplib_wqe back to ib_wc */
2930 memset(wc, 0, sizeof(*wc));
2931
2932 wc->wr_id = cqe->wr_id;
2933 wc->byte_len = cqe->length;
2934 qp = container_of
2935 ((struct bnxt_qplib_qp *)
2936 (unsigned long)(cqe->qp_handle),
2937 struct bnxt_re_qp, qplib_qp);
2938 if (!qp) {
2939 dev_err(rdev_to_dev(cq->rdev),
2940 "POLL CQ : bad QP handle");
2941 continue;
2942 }
2943 wc->qp = &qp->ib_qp;
2944 wc->ex.imm_data = cqe->immdata;
2945 wc->src_qp = cqe->src_qp;
2946 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2947 wc->port_num = 1;
2948 wc->vendor_err = cqe->status;
2949
2950 switch (cqe->opcode) {
2951 case CQ_BASE_CQE_TYPE_REQ:
2952 if (qp->qplib_qp.id ==
2953 qp->rdev->qp1_sqp->qplib_qp.id) {
2954 /* Handle this completion with
2955 * the stored completion
2956 */
2957 memset(wc, 0, sizeof(*wc));
2958 continue;
2959 }
2960 bnxt_re_process_req_wc(wc, cqe);
2961 break;
2962 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2963 if (!cqe->status) {
2964 int rc = 0;
2965
2966 rc = bnxt_re_process_raw_qp_pkt_rx
2967 (qp, cqe);
2968 if (!rc) {
2969 memset(wc, 0, sizeof(*wc));
2970 continue;
2971 }
2972 cqe->status = -1;
2973 }
2974 /* Errors need not be looped back.
2975 * But change the wr_id to the one
2976 * stored in the table
2977 */
2978 tbl_idx = cqe->wr_id;
2979 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2980 wc->wr_id = sqp_entry->wrid;
2981 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2982 break;
2983 case CQ_BASE_CQE_TYPE_RES_RC:
2984 bnxt_re_process_res_rc_wc(wc, cqe);
2985 break;
2986 case CQ_BASE_CQE_TYPE_RES_UD:
2987 if (qp->qplib_qp.id ==
2988 qp->rdev->qp1_sqp->qplib_qp.id) {
2989 /* Handle this completion with
2990 * the stored completion
2991 */
2992 if (cqe->status) {
2993 continue;
2994 } else {
2995 bnxt_re_process_res_shadow_qp_wc
2996 (qp, wc, cqe);
2997 break;
2998 }
2999 }
3000 bnxt_re_process_res_ud_wc(wc, cqe);
3001 break;
3002 default:
3003 dev_err(rdev_to_dev(cq->rdev),
3004 "POLL CQ : type 0x%x not handled",
3005 cqe->opcode);
3006 continue;
3007 }
3008 wc++;
3009 budget--;
3010 }
3011 }
3012exit:
3013 spin_unlock_irqrestore(&cq->cq_lock, flags);
3014 return num_entries - budget;
3015}
3016
3017int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3018 enum ib_cq_notify_flags ib_cqn_flags)
3019{
3020 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3021 int type = 0;
3022
3023 /* Trigger on the very next completion */
3024 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3025 type = DBR_DBR_TYPE_CQ_ARMALL;
3026 /* Trigger on the next solicited completion */
3027 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3028 type = DBR_DBR_TYPE_CQ_ARMSE;
3029
499e4569
SX
3030 /* Poll to see if there are missed events */
3031 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3032 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3033 return 1;
3034
1ac5a404
SX
3035 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3036
3037 return 0;
3038}
3039
3040/* Memory Regions */
3041struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3042{
3043 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3044 struct bnxt_re_dev *rdev = pd->rdev;
3045 struct bnxt_re_mr *mr;
3046 u64 pbl = 0;
3047 int rc;
3048
3049 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3050 if (!mr)
3051 return ERR_PTR(-ENOMEM);
3052
3053 mr->rdev = rdev;
3054 mr->qplib_mr.pd = &pd->qplib_pd;
3055 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3056 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3057
3058 /* Allocate and register 0 as the address */
3059 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3060 if (rc)
3061 goto fail;
3062
3063 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3064 mr->qplib_mr.total_size = -1; /* Infinte length */
3065 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3066 if (rc)
3067 goto fail_mr;
3068
3069 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3070 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3071 IB_ACCESS_REMOTE_ATOMIC))
3072 mr->ib_mr.rkey = mr->ib_mr.lkey;
3073 atomic_inc(&rdev->mr_count);
3074
3075 return &mr->ib_mr;
3076
3077fail_mr:
3078 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3079fail:
3080 kfree(mr);
3081 return ERR_PTR(rc);
3082}
3083
3084int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3085{
3086 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3087 struct bnxt_re_dev *rdev = mr->rdev;
ebbd1dfb 3088 int rc;
1ac5a404 3089
1c980b01
SX
3090 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3091 if (rc) {
3092 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3093 return rc;
3094 }
3095
1ac5a404
SX
3096 if (mr->npages && mr->pages) {
3097 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3098 &mr->qplib_frpl);
3099 kfree(mr->pages);
3100 mr->npages = 0;
3101 mr->pages = NULL;
3102 }
374cb861 3103 if (!IS_ERR_OR_NULL(mr->ib_umem))
1ac5a404
SX
3104 ib_umem_release(mr->ib_umem);
3105
3106 kfree(mr);
3107 atomic_dec(&rdev->mr_count);
3108 return rc;
3109}
3110
3111static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3112{
3113 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3114
3115 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3116 return -ENOMEM;
3117
3118 mr->pages[mr->npages++] = addr;
3119 return 0;
3120}
3121
3122int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3123 unsigned int *sg_offset)
3124{
3125 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3126
3127 mr->npages = 0;
3128 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3129}
3130
3131struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3132 u32 max_num_sg)
3133{
3134 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3135 struct bnxt_re_dev *rdev = pd->rdev;
3136 struct bnxt_re_mr *mr = NULL;
3137 int rc;
3138
3139 if (type != IB_MR_TYPE_MEM_REG) {
3140 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3141 return ERR_PTR(-EINVAL);
3142 }
3143 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3144 return ERR_PTR(-EINVAL);
3145
3146 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3147 if (!mr)
3148 return ERR_PTR(-ENOMEM);
3149
3150 mr->rdev = rdev;
3151 mr->qplib_mr.pd = &pd->qplib_pd;
3152 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3153 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3154
3155 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3156 if (rc)
3157 goto fail;
3158
3159 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3160 mr->ib_mr.rkey = mr->ib_mr.lkey;
3161
3162 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3163 if (!mr->pages) {
3164 rc = -ENOMEM;
3165 goto fail;
3166 }
3167 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3168 &mr->qplib_frpl, max_num_sg);
3169 if (rc) {
3170 dev_err(rdev_to_dev(rdev),
3171 "Failed to allocate HW FR page list");
3172 goto fail_mr;
3173 }
3174
3175 atomic_inc(&rdev->mr_count);
3176 return &mr->ib_mr;
3177
3178fail_mr:
3179 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3180fail:
3181 kfree(mr->pages);
3182 kfree(mr);
3183 return ERR_PTR(rc);
3184}
3185
9152e0b7
EW
3186struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3187 struct ib_udata *udata)
3188{
3189 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3190 struct bnxt_re_dev *rdev = pd->rdev;
3191 struct bnxt_re_mw *mw;
3192 int rc;
3193
3194 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3195 if (!mw)
3196 return ERR_PTR(-ENOMEM);
3197 mw->rdev = rdev;
3198 mw->qplib_mw.pd = &pd->qplib_pd;
3199
3200 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3201 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3202 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3203 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3204 if (rc) {
3205 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3206 goto fail;
3207 }
3208 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3209
3210 atomic_inc(&rdev->mw_count);
3211 return &mw->ib_mw;
3212
3213fail:
3214 kfree(mw);
3215 return ERR_PTR(rc);
3216}
3217
3218int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3219{
3220 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3221 struct bnxt_re_dev *rdev = mw->rdev;
3222 int rc;
3223
3224 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3225 if (rc) {
3226 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3227 return rc;
3228 }
3229
3230 kfree(mw);
3231 atomic_dec(&rdev->mw_count);
3232 return rc;
3233}
3234
1ac5a404
SX
3235/* uverbs */
3236struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3237 u64 virt_addr, int mr_access_flags,
3238 struct ib_udata *udata)
3239{
3240 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3241 struct bnxt_re_dev *rdev = pd->rdev;
3242 struct bnxt_re_mr *mr;
3243 struct ib_umem *umem;
3244 u64 *pbl_tbl, *pbl_tbl_orig;
3e7e1193 3245 int i, umem_pgs, pages, rc;
1ac5a404
SX
3246 struct scatterlist *sg;
3247 int entry;
3248
58d4a671
SX
3249 if (length > BNXT_RE_MAX_MR_SIZE) {
3250 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3251 length, BNXT_RE_MAX_MR_SIZE);
3252 return ERR_PTR(-ENOMEM);
3253 }
3254
1ac5a404
SX
3255 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3256 if (!mr)
3257 return ERR_PTR(-ENOMEM);
3258
3259 mr->rdev = rdev;
3260 mr->qplib_mr.pd = &pd->qplib_pd;
3261 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3262 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3263
3264 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3265 mr_access_flags, 0);
3266 if (IS_ERR(umem)) {
3267 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3268 rc = -EFAULT;
3269 goto free_mr;
3270 }
3271 mr->ib_umem = umem;
3272
3273 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3274 if (rc) {
3275 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3276 goto release_umem;
3277 }
3278 /* The fixed portion of the rkey is the same as the lkey */
3279 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3280
3281 mr->qplib_mr.va = virt_addr;
3282 umem_pgs = ib_umem_page_count(umem);
3283 if (!umem_pgs) {
3284 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3285 rc = -EINVAL;
3286 goto free_mrw;
3287 }
3288 mr->qplib_mr.total_size = length;
3289
3290 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3291 if (!pbl_tbl) {
3292 rc = -EINVAL;
3293 goto free_mrw;
3294 }
3295 pbl_tbl_orig = pbl_tbl;
3296
1ac5a404
SX
3297 if (umem->hugetlb) {
3298 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3299 rc = -EFAULT;
3300 goto fail;
3301 }
3e7e1193
AK
3302
3303 if (umem->page_shift != PAGE_SHIFT) {
3304 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
1ac5a404
SX
3305 rc = -EFAULT;
3306 goto fail;
3307 }
3308 /* Map umem buf ptrs to the PBL */
3309 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3e7e1193 3310 pages = sg_dma_len(sg) >> umem->page_shift;
1ac5a404 3311 for (i = 0; i < pages; i++, pbl_tbl++)
3e7e1193 3312 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
1ac5a404
SX
3313 }
3314 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3315 umem_pgs, false);
3316 if (rc) {
3317 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3318 goto fail;
3319 }
3320
3321 kfree(pbl_tbl_orig);
3322
3323 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3324 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3325 atomic_inc(&rdev->mr_count);
3326
3327 return &mr->ib_mr;
3328fail:
3329 kfree(pbl_tbl_orig);
3330free_mrw:
3331 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3332release_umem:
3333 ib_umem_release(umem);
3334free_mr:
3335 kfree(mr);
3336 return ERR_PTR(rc);
3337}
3338
3339struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3340 struct ib_udata *udata)
3341{
3342 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3343 struct bnxt_re_uctx_resp resp;
3344 struct bnxt_re_ucontext *uctx;
3345 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3346 int rc;
3347
3348 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3349 ibdev->uverbs_abi_ver);
3350
3351 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3352 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3353 BNXT_RE_ABI_VERSION);
3354 return ERR_PTR(-EPERM);
3355 }
3356
3357 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3358 if (!uctx)
3359 return ERR_PTR(-ENOMEM);
3360
3361 uctx->rdev = rdev;
3362
3363 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3364 if (!uctx->shpg) {
3365 rc = -ENOMEM;
3366 goto fail;
3367 }
3368 spin_lock_init(&uctx->sh_lock);
3369
3370 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3371 resp.max_qp = rdev->qplib_ctx.qpc_count;
3372 resp.pg_size = PAGE_SIZE;
3373 resp.cqe_sz = sizeof(struct cq_base);
3374 resp.max_cqd = dev_attr->max_cq_wqes;
3375 resp.rsvd = 0;
3376
3377 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3378 if (rc) {
3379 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3380 rc = -EFAULT;
3381 goto cfail;
3382 }
3383
3384 return &uctx->ib_uctx;
3385cfail:
3386 free_page((unsigned long)uctx->shpg);
3387 uctx->shpg = NULL;
3388fail:
3389 kfree(uctx);
3390 return ERR_PTR(rc);
3391}
3392
3393int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3394{
3395 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3396 struct bnxt_re_ucontext,
3397 ib_uctx);
b3b2c7c5
DS
3398
3399 struct bnxt_re_dev *rdev = uctx->rdev;
3400 int rc = 0;
3401
1ac5a404
SX
3402 if (uctx->shpg)
3403 free_page((unsigned long)uctx->shpg);
b3b2c7c5
DS
3404
3405 if (uctx->dpi.dbr) {
3406 /* Free DPI only if this is the first PD allocated by the
3407 * application and mark the context dpi as NULL
3408 */
3409 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3410 &rdev->qplib_res.dpi_tbl,
3411 &uctx->dpi);
3412 if (rc)
3413 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3414 /* Don't fail, continue*/
3415 uctx->dpi.dbr = NULL;
3416 }
3417
1ac5a404
SX
3418 kfree(uctx);
3419 return 0;
3420}
3421
3422/* Helper function to mmap the virtual memory from user app */
3423int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3424{
3425 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3426 struct bnxt_re_ucontext,
3427 ib_uctx);
3428 struct bnxt_re_dev *rdev = uctx->rdev;
3429 u64 pfn;
3430
3431 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3432 return -EINVAL;
3433
3434 if (vma->vm_pgoff) {
3435 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3436 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3437 PAGE_SIZE, vma->vm_page_prot)) {
3438 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3439 return -EAGAIN;
3440 }
3441 } else {
3442 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3443 if (remap_pfn_range(vma, vma->vm_start,
3444 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3445 dev_err(rdev_to_dev(rdev),
3446 "Failed to map shared page");
3447 return -EAGAIN;
3448 }
3449 }
3450
3451 return 0;
3452}