2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
55 #include "qplib_res.h"
58 #include "qplib_rcfw.h"
62 #include <rdma/bnxt_re-abi.h>
64 static int __from_ib_access_flags(int iflags
)
68 if (iflags
& IB_ACCESS_LOCAL_WRITE
)
69 qflags
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
70 if (iflags
& IB_ACCESS_REMOTE_READ
)
71 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_READ
;
72 if (iflags
& IB_ACCESS_REMOTE_WRITE
)
73 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_WRITE
;
74 if (iflags
& IB_ACCESS_REMOTE_ATOMIC
)
75 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
;
76 if (iflags
& IB_ACCESS_MW_BIND
)
77 qflags
|= BNXT_QPLIB_ACCESS_MW_BIND
;
78 if (iflags
& IB_ZERO_BASED
)
79 qflags
|= BNXT_QPLIB_ACCESS_ZERO_BASED
;
80 if (iflags
& IB_ACCESS_ON_DEMAND
)
81 qflags
|= BNXT_QPLIB_ACCESS_ON_DEMAND
;
85 static enum ib_access_flags
__to_ib_access_flags(int qflags
)
87 enum ib_access_flags iflags
= 0;
89 if (qflags
& BNXT_QPLIB_ACCESS_LOCAL_WRITE
)
90 iflags
|= IB_ACCESS_LOCAL_WRITE
;
91 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_WRITE
)
92 iflags
|= IB_ACCESS_REMOTE_WRITE
;
93 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_READ
)
94 iflags
|= IB_ACCESS_REMOTE_READ
;
95 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
)
96 iflags
|= IB_ACCESS_REMOTE_ATOMIC
;
97 if (qflags
& BNXT_QPLIB_ACCESS_MW_BIND
)
98 iflags
|= IB_ACCESS_MW_BIND
;
99 if (qflags
& BNXT_QPLIB_ACCESS_ZERO_BASED
)
100 iflags
|= IB_ZERO_BASED
;
101 if (qflags
& BNXT_QPLIB_ACCESS_ON_DEMAND
)
102 iflags
|= IB_ACCESS_ON_DEMAND
;
106 static int bnxt_re_build_sgl(struct ib_sge
*ib_sg_list
,
107 struct bnxt_qplib_sge
*sg_list
, int num
)
111 for (i
= 0; i
< num
; i
++) {
112 sg_list
[i
].addr
= ib_sg_list
[i
].addr
;
113 sg_list
[i
].lkey
= ib_sg_list
[i
].lkey
;
114 sg_list
[i
].size
= ib_sg_list
[i
].length
;
115 total
+= sg_list
[i
].size
;
121 struct net_device
*bnxt_re_get_netdev(struct ib_device
*ibdev
, u8 port_num
)
123 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
124 struct net_device
*netdev
= NULL
;
128 netdev
= rdev
->netdev
;
136 int bnxt_re_query_device(struct ib_device
*ibdev
,
137 struct ib_device_attr
*ib_attr
,
138 struct ib_udata
*udata
)
140 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
141 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
143 memset(ib_attr
, 0, sizeof(*ib_attr
));
145 ib_attr
->fw_ver
= (u64
)(unsigned long)(dev_attr
->fw_ver
);
146 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
,
147 (u8
*)&ib_attr
->sys_image_guid
);
148 ib_attr
->max_mr_size
= ~0ull;
149 ib_attr
->page_size_cap
= BNXT_RE_PAGE_SIZE_4K
| BNXT_RE_PAGE_SIZE_8K
|
150 BNXT_RE_PAGE_SIZE_64K
| BNXT_RE_PAGE_SIZE_2M
|
151 BNXT_RE_PAGE_SIZE_8M
| BNXT_RE_PAGE_SIZE_1G
;
153 ib_attr
->vendor_id
= rdev
->en_dev
->pdev
->vendor
;
154 ib_attr
->vendor_part_id
= rdev
->en_dev
->pdev
->device
;
155 ib_attr
->hw_ver
= rdev
->en_dev
->pdev
->subsystem_device
;
156 ib_attr
->max_qp
= dev_attr
->max_qp
;
157 ib_attr
->max_qp_wr
= dev_attr
->max_qp_wqes
;
158 ib_attr
->device_cap_flags
=
159 IB_DEVICE_CURR_QP_STATE_MOD
160 | IB_DEVICE_RC_RNR_NAK_GEN
161 | IB_DEVICE_SHUTDOWN_PORT
162 | IB_DEVICE_SYS_IMAGE_GUID
163 | IB_DEVICE_LOCAL_DMA_LKEY
164 | IB_DEVICE_RESIZE_MAX_WR
165 | IB_DEVICE_PORT_ACTIVE_EVENT
166 | IB_DEVICE_N_NOTIFY_CQ
167 | IB_DEVICE_MEM_WINDOW
168 | IB_DEVICE_MEM_WINDOW_TYPE_2B
169 | IB_DEVICE_MEM_MGT_EXTENSIONS
;
170 ib_attr
->max_sge
= dev_attr
->max_qp_sges
;
171 ib_attr
->max_sge_rd
= dev_attr
->max_qp_sges
;
172 ib_attr
->max_cq
= dev_attr
->max_cq
;
173 ib_attr
->max_cqe
= dev_attr
->max_cq_wqes
;
174 ib_attr
->max_mr
= dev_attr
->max_mr
;
175 ib_attr
->max_pd
= dev_attr
->max_pd
;
176 ib_attr
->max_qp_rd_atom
= dev_attr
->max_qp_rd_atom
;
177 ib_attr
->max_qp_init_rd_atom
= dev_attr
->max_qp_rd_atom
;
178 ib_attr
->atomic_cap
= IB_ATOMIC_HCA
;
179 ib_attr
->masked_atomic_cap
= IB_ATOMIC_HCA
;
181 ib_attr
->max_ee_rd_atom
= 0;
182 ib_attr
->max_res_rd_atom
= 0;
183 ib_attr
->max_ee_init_rd_atom
= 0;
185 ib_attr
->max_rdd
= 0;
186 ib_attr
->max_mw
= dev_attr
->max_mw
;
187 ib_attr
->max_raw_ipv6_qp
= 0;
188 ib_attr
->max_raw_ethy_qp
= dev_attr
->max_raw_ethy_qp
;
189 ib_attr
->max_mcast_grp
= 0;
190 ib_attr
->max_mcast_qp_attach
= 0;
191 ib_attr
->max_total_mcast_qp_attach
= 0;
192 ib_attr
->max_ah
= dev_attr
->max_ah
;
194 ib_attr
->max_fmr
= 0;
195 ib_attr
->max_map_per_fmr
= 0;
197 ib_attr
->max_srq
= dev_attr
->max_srq
;
198 ib_attr
->max_srq_wr
= dev_attr
->max_srq_wqes
;
199 ib_attr
->max_srq_sge
= dev_attr
->max_srq_sges
;
201 ib_attr
->max_fast_reg_page_list_len
= MAX_PBL_LVL_1_PGS
;
203 ib_attr
->max_pkeys
= 1;
204 ib_attr
->local_ca_ack_delay
= 0;
208 int bnxt_re_modify_device(struct ib_device
*ibdev
,
209 int device_modify_mask
,
210 struct ib_device_modify
*device_modify
)
212 switch (device_modify_mask
) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID
:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
217 case IB_DEVICE_MODIFY_NODE_DESC
:
218 /* Node Desc should be made as READ-ONLY */
226 static void __to_ib_speed_width(struct net_device
*netdev
, u8
*speed
, u8
*width
)
228 struct ethtool_link_ksettings lksettings
;
231 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_link_ksettings
) {
232 memset(&lksettings
, 0, sizeof(lksettings
));
234 netdev
->ethtool_ops
->get_link_ksettings(netdev
, &lksettings
);
236 espeed
= lksettings
.base
.speed
;
238 espeed
= SPEED_UNKNOWN
;
242 *speed
= IB_SPEED_SDR
;
243 *width
= IB_WIDTH_1X
;
246 *speed
= IB_SPEED_QDR
;
247 *width
= IB_WIDTH_1X
;
250 *speed
= IB_SPEED_DDR
;
251 *width
= IB_WIDTH_4X
;
254 *speed
= IB_SPEED_EDR
;
255 *width
= IB_WIDTH_1X
;
258 *speed
= IB_SPEED_QDR
;
259 *width
= IB_WIDTH_4X
;
264 *speed
= IB_SPEED_SDR
;
265 *width
= IB_WIDTH_1X
;
271 int bnxt_re_query_port(struct ib_device
*ibdev
, u8 port_num
,
272 struct ib_port_attr
*port_attr
)
274 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
275 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
277 memset(port_attr
, 0, sizeof(*port_attr
));
279 if (netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
)) {
280 port_attr
->state
= IB_PORT_ACTIVE
;
281 port_attr
->phys_state
= 5;
283 port_attr
->state
= IB_PORT_DOWN
;
284 port_attr
->phys_state
= 3;
286 port_attr
->max_mtu
= IB_MTU_4096
;
287 port_attr
->active_mtu
= iboe_get_mtu(rdev
->netdev
->mtu
);
288 port_attr
->gid_tbl_len
= dev_attr
->max_sgid
;
289 port_attr
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
290 IB_PORT_DEVICE_MGMT_SUP
|
291 IB_PORT_VENDOR_CLASS_SUP
|
292 IB_PORT_IP_BASED_GIDS
;
294 /* Max MSG size set to 2G for now */
295 port_attr
->max_msg_sz
= 0x80000000;
296 port_attr
->bad_pkey_cntr
= 0;
297 port_attr
->qkey_viol_cntr
= 0;
298 port_attr
->pkey_tbl_len
= dev_attr
->max_pkey
;
300 port_attr
->sm_lid
= 0;
302 port_attr
->max_vl_num
= 4;
303 port_attr
->sm_sl
= 0;
304 port_attr
->subnet_timeout
= 0;
305 port_attr
->init_type_reply
= 0;
306 /* call the underlying netdev's ethtool hooks to query speed settings
307 * for which we acquire rtnl_lock _only_ if it's registered with
308 * IB stack to avoid race in the NETDEV_UNREG path
310 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
311 __to_ib_speed_width(rdev
->netdev
, &port_attr
->active_speed
,
312 &port_attr
->active_width
);
316 int bnxt_re_modify_port(struct ib_device
*ibdev
, u8 port_num
,
317 int port_modify_mask
,
318 struct ib_port_modify
*port_modify
)
320 switch (port_modify_mask
) {
321 case IB_PORT_SHUTDOWN
:
323 case IB_PORT_INIT_TYPE
:
325 case IB_PORT_RESET_QKEY_CNTR
:
333 int bnxt_re_get_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
334 struct ib_port_immutable
*immutable
)
336 struct ib_port_attr port_attr
;
338 if (bnxt_re_query_port(ibdev
, port_num
, &port_attr
))
341 immutable
->pkey_tbl_len
= port_attr
.pkey_tbl_len
;
342 immutable
->gid_tbl_len
= port_attr
.gid_tbl_len
;
343 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
344 immutable
->core_cap_flags
|= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
345 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
349 int bnxt_re_query_pkey(struct ib_device
*ibdev
, u8 port_num
,
350 u16 index
, u16
*pkey
)
352 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
354 /* Ignore port_num */
356 memset(pkey
, 0, sizeof(*pkey
));
357 return bnxt_qplib_get_pkey(&rdev
->qplib_res
,
358 &rdev
->qplib_res
.pkey_tbl
, index
, pkey
);
361 int bnxt_re_query_gid(struct ib_device
*ibdev
, u8 port_num
,
362 int index
, union ib_gid
*gid
)
364 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
367 /* Ignore port_num */
368 memset(gid
, 0, sizeof(*gid
));
369 rc
= bnxt_qplib_get_sgid(&rdev
->qplib_res
,
370 &rdev
->qplib_res
.sgid_tbl
, index
,
371 (struct bnxt_qplib_gid
*)gid
);
375 int bnxt_re_del_gid(struct ib_device
*ibdev
, u8 port_num
,
376 unsigned int index
, void **context
)
379 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
380 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
381 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
383 /* Delete the entry from the hardware */
388 if (sgid_tbl
&& sgid_tbl
->active
) {
389 if (ctx
->idx
>= sgid_tbl
->max
)
393 rc
= bnxt_qplib_del_sgid
395 &sgid_tbl
->tbl
[ctx
->idx
], true);
397 dev_err(rdev_to_dev(rdev
),
398 "Failed to remove GID: %#x", rc
);
399 ctx_tbl
= sgid_tbl
->ctx
;
400 ctx_tbl
[ctx
->idx
] = NULL
;
409 int bnxt_re_add_gid(struct ib_device
*ibdev
, u8 port_num
,
410 unsigned int index
, const union ib_gid
*gid
,
411 const struct ib_gid_attr
*attr
, void **context
)
415 u16 vlan_id
= 0xFFFF;
416 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
417 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
418 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
420 if ((attr
->ndev
) && is_vlan_dev(attr
->ndev
))
421 vlan_id
= vlan_dev_vlan_id(attr
->ndev
);
423 rc
= bnxt_qplib_add_sgid(sgid_tbl
, (struct bnxt_qplib_gid
*)gid
,
424 rdev
->qplib_res
.netdev
->dev_addr
,
425 vlan_id
, true, &tbl_idx
);
426 if (rc
== -EALREADY
) {
427 ctx_tbl
= sgid_tbl
->ctx
;
428 ctx_tbl
[tbl_idx
]->refcnt
++;
429 *context
= ctx_tbl
[tbl_idx
];
434 dev_err(rdev_to_dev(rdev
), "Failed to add GID: %#x", rc
);
438 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
441 ctx_tbl
= sgid_tbl
->ctx
;
444 ctx_tbl
[tbl_idx
] = ctx
;
449 enum rdma_link_layer
bnxt_re_get_link_layer(struct ib_device
*ibdev
,
452 return IB_LINK_LAYER_ETHERNET
;
455 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
457 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd
*pd
)
459 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
460 struct ib_mr
*ib_mr
= &fence
->mr
->ib_mr
;
461 struct bnxt_qplib_swqe
*wqe
= &fence
->bind_wqe
;
463 memset(wqe
, 0, sizeof(*wqe
));
464 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_BIND_MW
;
465 wqe
->wr_id
= BNXT_QPLIB_FENCE_WRID
;
466 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
467 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
468 wqe
->bind
.zero_based
= false;
469 wqe
->bind
.parent_l_key
= ib_mr
->lkey
;
470 wqe
->bind
.va
= (u64
)(unsigned long)fence
->va
;
471 wqe
->bind
.length
= fence
->size
;
472 wqe
->bind
.access_cntl
= __from_ib_access_flags(IB_ACCESS_REMOTE_READ
);
473 wqe
->bind
.mw_type
= SQ_BIND_MW_TYPE_TYPE1
;
475 /* Save the initial rkey in fence structure for now;
476 * wqe->bind.r_key will be set at (re)bind time.
478 fence
->bind_rkey
= ib_inc_rkey(fence
->mw
->rkey
);
481 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp
*qplib_qp
)
483 struct bnxt_re_qp
*qp
= container_of(qplib_qp
, struct bnxt_re_qp
,
485 struct ib_pd
*ib_pd
= qp
->ib_qp
.pd
;
486 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
487 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
488 struct bnxt_qplib_swqe
*fence_wqe
= &fence
->bind_wqe
;
489 struct bnxt_qplib_swqe wqe
;
492 memcpy(&wqe
, fence_wqe
, sizeof(wqe
));
493 wqe
.bind
.r_key
= fence
->bind_rkey
;
494 fence
->bind_rkey
= ib_inc_rkey(fence
->bind_rkey
);
496 dev_dbg(rdev_to_dev(qp
->rdev
),
497 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
498 wqe
.bind
.r_key
, qp
->qplib_qp
.id
, pd
);
499 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
501 dev_err(rdev_to_dev(qp
->rdev
), "Failed to bind fence-WQE\n");
504 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
509 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd
*pd
)
511 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
512 struct bnxt_re_dev
*rdev
= pd
->rdev
;
513 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
514 struct bnxt_re_mr
*mr
= fence
->mr
;
517 bnxt_re_dealloc_mw(fence
->mw
);
522 bnxt_qplib_dereg_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
,
525 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
529 if (fence
->dma_addr
) {
530 dma_unmap_single(dev
, fence
->dma_addr
, BNXT_RE_FENCE_BYTES
,
536 static int bnxt_re_create_fence_mr(struct bnxt_re_pd
*pd
)
538 int mr_access_flags
= IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_MW_BIND
;
539 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
540 struct bnxt_re_dev
*rdev
= pd
->rdev
;
541 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
542 struct bnxt_re_mr
*mr
= NULL
;
543 dma_addr_t dma_addr
= 0;
548 dma_addr
= dma_map_single(dev
, fence
->va
, BNXT_RE_FENCE_BYTES
,
550 rc
= dma_mapping_error(dev
, dma_addr
);
552 dev_err(rdev_to_dev(rdev
), "Failed to dma-map fence-MR-mem\n");
557 fence
->dma_addr
= dma_addr
;
560 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
567 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
568 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
569 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
570 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
572 dev_err(rdev_to_dev(rdev
), "Failed to alloc fence-HW-MR\n");
577 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
578 mr
->qplib_mr
.va
= (u64
)(unsigned long)fence
->va
;
579 mr
->qplib_mr
.total_size
= BNXT_RE_FENCE_BYTES
;
581 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl_tbl
,
582 BNXT_RE_FENCE_PBL_SIZE
, false);
584 dev_err(rdev_to_dev(rdev
), "Failed to register fence-MR\n");
587 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
589 /* Create a fence MW only for kernel consumers */
590 mw
= bnxt_re_alloc_mw(&pd
->ib_pd
, IB_MW_TYPE_1
, NULL
);
592 dev_err(rdev_to_dev(rdev
),
593 "Failed to create fence-MW for PD: %p\n", pd
);
599 bnxt_re_create_fence_wqe(pd
);
603 bnxt_re_destroy_fence_mr(pd
);
607 /* Protection Domains */
608 int bnxt_re_dealloc_pd(struct ib_pd
*ib_pd
)
610 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
611 struct bnxt_re_dev
*rdev
= pd
->rdev
;
614 bnxt_re_destroy_fence_mr(pd
);
615 if (ib_pd
->uobject
&& pd
->dpi
.dbr
) {
616 struct ib_ucontext
*ib_uctx
= ib_pd
->uobject
->context
;
617 struct bnxt_re_ucontext
*ucntx
;
619 /* Free DPI only if this is the first PD allocated by the
620 * application and mark the context dpi as NULL
622 ucntx
= container_of(ib_uctx
, struct bnxt_re_ucontext
, ib_uctx
);
624 rc
= bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
625 &rdev
->qplib_res
.dpi_tbl
,
628 dev_err(rdev_to_dev(rdev
), "Failed to deallocate HW DPI");
629 /* Don't fail, continue*/
633 rc
= bnxt_qplib_dealloc_pd(&rdev
->qplib_res
,
634 &rdev
->qplib_res
.pd_tbl
,
637 dev_err(rdev_to_dev(rdev
), "Failed to deallocate HW PD");
645 struct ib_pd
*bnxt_re_alloc_pd(struct ib_device
*ibdev
,
646 struct ib_ucontext
*ucontext
,
647 struct ib_udata
*udata
)
649 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
650 struct bnxt_re_ucontext
*ucntx
= container_of(ucontext
,
651 struct bnxt_re_ucontext
,
653 struct bnxt_re_pd
*pd
;
656 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
658 return ERR_PTR(-ENOMEM
);
661 if (bnxt_qplib_alloc_pd(&rdev
->qplib_res
.pd_tbl
, &pd
->qplib_pd
)) {
662 dev_err(rdev_to_dev(rdev
), "Failed to allocate HW PD");
668 struct bnxt_re_pd_resp resp
;
671 /* Allocate DPI in alloc_pd to avoid failing of
672 * ibv_devinfo and family of application when DPIs
675 if (bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
680 ucntx
->dpi
= &pd
->dpi
;
683 resp
.pdid
= pd
->qplib_pd
.id
;
684 /* Still allow mapping this DBR to the new user PD. */
685 resp
.dpi
= ucntx
->dpi
->dpi
;
686 resp
.dbr
= (u64
)ucntx
->dpi
->umdbr
;
688 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
690 dev_err(rdev_to_dev(rdev
),
691 "Failed to copy user response\n");
697 if (bnxt_re_create_fence_mr(pd
))
698 dev_warn(rdev_to_dev(rdev
),
699 "Failed to create Fence-MR\n");
702 (void)bnxt_qplib_dealloc_pd(&rdev
->qplib_res
, &rdev
->qplib_res
.pd_tbl
,
709 /* Address Handles */
710 int bnxt_re_destroy_ah(struct ib_ah
*ib_ah
)
712 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
713 struct bnxt_re_dev
*rdev
= ah
->rdev
;
716 rc
= bnxt_qplib_destroy_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
718 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW AH");
725 struct ib_ah
*bnxt_re_create_ah(struct ib_pd
*ib_pd
,
726 struct rdma_ah_attr
*ah_attr
,
727 struct ib_udata
*udata
)
729 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
730 struct bnxt_re_dev
*rdev
= pd
->rdev
;
731 struct bnxt_re_ah
*ah
;
732 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah_attr
);
737 struct ib_gid_attr sgid_attr
;
739 if (!(rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
)) {
740 dev_err(rdev_to_dev(rdev
), "Failed to alloc AH: GRH not set");
741 return ERR_PTR(-EINVAL
);
743 ah
= kzalloc(sizeof(*ah
), GFP_ATOMIC
);
745 return ERR_PTR(-ENOMEM
);
748 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
750 /* Supply the configuration for the HW */
751 memcpy(ah
->qplib_ah
.dgid
.data
, grh
->dgid
.raw
,
752 sizeof(union ib_gid
));
754 * If RoCE V2 is enabled, stack will have two entries for
755 * each GID entry. Avoiding this duplicte entry in HW. Dividing
756 * the GID index by 2 for RoCE V2
758 ah
->qplib_ah
.sgid_index
= grh
->sgid_index
/ 2;
759 ah
->qplib_ah
.host_sgid_index
= grh
->sgid_index
;
760 ah
->qplib_ah
.traffic_class
= grh
->traffic_class
;
761 ah
->qplib_ah
.flow_label
= grh
->flow_label
;
762 ah
->qplib_ah
.hop_limit
= grh
->hop_limit
;
763 ah
->qplib_ah
.sl
= rdma_ah_get_sl(ah_attr
);
764 if (ib_pd
->uobject
&&
765 !rdma_is_multicast_addr((struct in6_addr
*)
767 !rdma_link_local_addr((struct in6_addr
*)
771 rc
= ib_get_cached_gid(&rdev
->ibdev
, 1,
772 grh
->sgid_index
, &sgid
,
775 dev_err(rdev_to_dev(rdev
),
776 "Failed to query gid at index %d",
780 if (sgid_attr
.ndev
) {
781 if (is_vlan_dev(sgid_attr
.ndev
))
782 vlan_tag
= vlan_dev_vlan_id(sgid_attr
.ndev
);
783 dev_put(sgid_attr
.ndev
);
785 /* Get network header type for this GID */
786 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
, &sgid
);
788 case RDMA_NETWORK_IPV4
:
789 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV4
;
791 case RDMA_NETWORK_IPV6
:
792 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV6
;
795 ah
->qplib_ah
.nw_type
= CMDQ_CREATE_AH_TYPE_V1
;
798 rc
= rdma_addr_find_l2_eth_by_grh(&sgid
, &grh
->dgid
,
799 ah_attr
->roce
.dmac
, &vlan_tag
,
800 &sgid_attr
.ndev
->ifindex
,
803 dev_err(rdev_to_dev(rdev
), "Failed to get dmac\n");
808 memcpy(ah
->qplib_ah
.dmac
, ah_attr
->roce
.dmac
, ETH_ALEN
);
809 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
811 dev_err(rdev_to_dev(rdev
), "Failed to allocate HW AH");
815 /* Write AVID to shared page. */
816 if (ib_pd
->uobject
) {
817 struct ib_ucontext
*ib_uctx
= ib_pd
->uobject
->context
;
818 struct bnxt_re_ucontext
*uctx
;
822 uctx
= container_of(ib_uctx
, struct bnxt_re_ucontext
, ib_uctx
);
823 spin_lock_irqsave(&uctx
->sh_lock
, flag
);
824 wrptr
= (u32
*)(uctx
->shpg
+ BNXT_RE_AVID_OFFT
);
825 *wrptr
= ah
->qplib_ah
.id
;
826 wmb(); /* make sure cache is updated. */
827 spin_unlock_irqrestore(&uctx
->sh_lock
, flag
);
837 int bnxt_re_modify_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
842 int bnxt_re_query_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
844 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
846 ah_attr
->type
= ib_ah
->type
;
847 rdma_ah_set_sl(ah_attr
, ah
->qplib_ah
.sl
);
848 memcpy(ah_attr
->roce
.dmac
, ah
->qplib_ah
.dmac
, ETH_ALEN
);
849 rdma_ah_set_grh(ah_attr
, NULL
, 0,
850 ah
->qplib_ah
.host_sgid_index
,
851 0, ah
->qplib_ah
.traffic_class
);
852 rdma_ah_set_dgid_raw(ah_attr
, ah
->qplib_ah
.dgid
.data
);
853 rdma_ah_set_port_num(ah_attr
, 1);
854 rdma_ah_set_static_rate(ah_attr
, 0);
859 int bnxt_re_destroy_qp(struct ib_qp
*ib_qp
)
861 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
862 struct bnxt_re_dev
*rdev
= qp
->rdev
;
865 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
867 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW QP");
870 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->qp1_sqp
) {
871 rc
= bnxt_qplib_destroy_ah(&rdev
->qplib_res
,
872 &rdev
->sqp_ah
->qplib_ah
);
874 dev_err(rdev_to_dev(rdev
),
875 "Failed to destroy HW AH for shadow QP");
879 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
,
880 &rdev
->qp1_sqp
->qplib_qp
);
882 dev_err(rdev_to_dev(rdev
),
883 "Failed to destroy Shadow QP");
886 mutex_lock(&rdev
->qp_lock
);
887 list_del(&rdev
->qp1_sqp
->list
);
888 atomic_dec(&rdev
->qp_count
);
889 mutex_unlock(&rdev
->qp_lock
);
892 kfree(rdev
->qp1_sqp
);
895 if (!IS_ERR_OR_NULL(qp
->rumem
))
896 ib_umem_release(qp
->rumem
);
897 if (!IS_ERR_OR_NULL(qp
->sumem
))
898 ib_umem_release(qp
->sumem
);
900 mutex_lock(&rdev
->qp_lock
);
902 atomic_dec(&rdev
->qp_count
);
903 mutex_unlock(&rdev
->qp_lock
);
908 static u8
__from_ib_qp_type(enum ib_qp_type type
)
912 return CMDQ_CREATE_QP1_TYPE_GSI
;
914 return CMDQ_CREATE_QP_TYPE_RC
;
916 return CMDQ_CREATE_QP_TYPE_UD
;
922 static int bnxt_re_init_user_qp(struct bnxt_re_dev
*rdev
, struct bnxt_re_pd
*pd
,
923 struct bnxt_re_qp
*qp
, struct ib_udata
*udata
)
925 struct bnxt_re_qp_req ureq
;
926 struct bnxt_qplib_qp
*qplib_qp
= &qp
->qplib_qp
;
927 struct ib_umem
*umem
;
929 struct ib_ucontext
*context
= pd
->ib_pd
.uobject
->context
;
930 struct bnxt_re_ucontext
*cntx
= container_of(context
,
931 struct bnxt_re_ucontext
,
933 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
936 bytes
= (qplib_qp
->sq
.max_wqe
* BNXT_QPLIB_MAX_SQE_ENTRY_SIZE
);
937 /* Consider mapping PSN search memory only for RC QPs. */
938 if (qplib_qp
->type
== CMDQ_CREATE_QP_TYPE_RC
)
939 bytes
+= (qplib_qp
->sq
.max_wqe
* sizeof(struct sq_psn_search
));
940 bytes
= PAGE_ALIGN(bytes
);
941 umem
= ib_umem_get(context
, ureq
.qpsva
, bytes
,
942 IB_ACCESS_LOCAL_WRITE
, 1);
944 return PTR_ERR(umem
);
947 qplib_qp
->sq
.sglist
= umem
->sg_head
.sgl
;
948 qplib_qp
->sq
.nmap
= umem
->nmap
;
949 qplib_qp
->qp_handle
= ureq
.qp_handle
;
951 if (!qp
->qplib_qp
.srq
) {
952 bytes
= (qplib_qp
->rq
.max_wqe
* BNXT_QPLIB_MAX_RQE_ENTRY_SIZE
);
953 bytes
= PAGE_ALIGN(bytes
);
954 umem
= ib_umem_get(context
, ureq
.qprva
, bytes
,
955 IB_ACCESS_LOCAL_WRITE
, 1);
959 qplib_qp
->rq
.sglist
= umem
->sg_head
.sgl
;
960 qplib_qp
->rq
.nmap
= umem
->nmap
;
963 qplib_qp
->dpi
= cntx
->dpi
;
966 ib_umem_release(qp
->sumem
);
968 qplib_qp
->sq
.sglist
= NULL
;
969 qplib_qp
->sq
.nmap
= 0;
971 return PTR_ERR(umem
);
974 static struct bnxt_re_ah
*bnxt_re_create_shadow_qp_ah
975 (struct bnxt_re_pd
*pd
,
976 struct bnxt_qplib_res
*qp1_res
,
977 struct bnxt_qplib_qp
*qp1_qp
)
979 struct bnxt_re_dev
*rdev
= pd
->rdev
;
980 struct bnxt_re_ah
*ah
;
984 ah
= kzalloc(sizeof(*ah
), GFP_KERNEL
);
988 memset(ah
, 0, sizeof(*ah
));
990 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
992 rc
= bnxt_re_query_gid(&rdev
->ibdev
, 1, 0, &sgid
);
996 /* supply the dgid data same as sgid */
997 memcpy(ah
->qplib_ah
.dgid
.data
, &sgid
.raw
,
998 sizeof(union ib_gid
));
999 ah
->qplib_ah
.sgid_index
= 0;
1001 ah
->qplib_ah
.traffic_class
= 0;
1002 ah
->qplib_ah
.flow_label
= 0;
1003 ah
->qplib_ah
.hop_limit
= 1;
1004 ah
->qplib_ah
.sl
= 0;
1005 /* Have DMAC same as SMAC */
1006 ether_addr_copy(ah
->qplib_ah
.dmac
, rdev
->netdev
->dev_addr
);
1008 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
);
1010 dev_err(rdev_to_dev(rdev
),
1011 "Failed to allocate HW AH for Shadow QP");
1022 static struct bnxt_re_qp
*bnxt_re_create_shadow_qp
1023 (struct bnxt_re_pd
*pd
,
1024 struct bnxt_qplib_res
*qp1_res
,
1025 struct bnxt_qplib_qp
*qp1_qp
)
1027 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1028 struct bnxt_re_qp
*qp
;
1031 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1035 memset(qp
, 0, sizeof(*qp
));
1038 /* Initialize the shadow QP structure from the QP1 values */
1039 ether_addr_copy(qp
->qplib_qp
.smac
, rdev
->netdev
->dev_addr
);
1041 qp
->qplib_qp
.pd
= &pd
->qplib_pd
;
1042 qp
->qplib_qp
.qp_handle
= (u64
)(unsigned long)(&qp
->qplib_qp
);
1043 qp
->qplib_qp
.type
= IB_QPT_UD
;
1045 qp
->qplib_qp
.max_inline_data
= 0;
1046 qp
->qplib_qp
.sig_type
= true;
1048 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1049 qp
->qplib_qp
.sq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
1050 qp
->qplib_qp
.sq
.max_sge
= 2;
1051 /* Q full delta can be 1 since it is internal QP */
1052 qp
->qplib_qp
.sq
.q_full_delta
= 1;
1054 qp
->qplib_qp
.scq
= qp1_qp
->scq
;
1055 qp
->qplib_qp
.rcq
= qp1_qp
->rcq
;
1057 qp
->qplib_qp
.rq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
1058 qp
->qplib_qp
.rq
.max_sge
= qp1_qp
->rq
.max_sge
;
1059 /* Q full delta can be 1 since it is internal QP */
1060 qp
->qplib_qp
.rq
.q_full_delta
= 1;
1062 qp
->qplib_qp
.mtu
= qp1_qp
->mtu
;
1064 qp
->qplib_qp
.sq_hdr_buf_size
= 0;
1065 qp
->qplib_qp
.rq_hdr_buf_size
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
1066 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1068 rc
= bnxt_qplib_create_qp(qp1_res
, &qp
->qplib_qp
);
1072 rdev
->sqp_id
= qp
->qplib_qp
.id
;
1074 spin_lock_init(&qp
->sq_lock
);
1075 INIT_LIST_HEAD(&qp
->list
);
1076 mutex_lock(&rdev
->qp_lock
);
1077 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1078 atomic_inc(&rdev
->qp_count
);
1079 mutex_unlock(&rdev
->qp_lock
);
1086 struct ib_qp
*bnxt_re_create_qp(struct ib_pd
*ib_pd
,
1087 struct ib_qp_init_attr
*qp_init_attr
,
1088 struct ib_udata
*udata
)
1090 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
1091 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1092 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1093 struct bnxt_re_qp
*qp
;
1094 struct bnxt_re_cq
*cq
;
1097 if ((qp_init_attr
->cap
.max_send_wr
> dev_attr
->max_qp_wqes
) ||
1098 (qp_init_attr
->cap
.max_recv_wr
> dev_attr
->max_qp_wqes
) ||
1099 (qp_init_attr
->cap
.max_send_sge
> dev_attr
->max_qp_sges
) ||
1100 (qp_init_attr
->cap
.max_recv_sge
> dev_attr
->max_qp_sges
) ||
1101 (qp_init_attr
->cap
.max_inline_data
> dev_attr
->max_inline_data
))
1102 return ERR_PTR(-EINVAL
);
1104 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1106 return ERR_PTR(-ENOMEM
);
1109 ether_addr_copy(qp
->qplib_qp
.smac
, rdev
->netdev
->dev_addr
);
1110 qp
->qplib_qp
.pd
= &pd
->qplib_pd
;
1111 qp
->qplib_qp
.qp_handle
= (u64
)(unsigned long)(&qp
->qplib_qp
);
1112 qp
->qplib_qp
.type
= __from_ib_qp_type(qp_init_attr
->qp_type
);
1113 if (qp
->qplib_qp
.type
== IB_QPT_MAX
) {
1114 dev_err(rdev_to_dev(rdev
), "QP type 0x%x not supported",
1119 qp
->qplib_qp
.max_inline_data
= qp_init_attr
->cap
.max_inline_data
;
1120 qp
->qplib_qp
.sig_type
= ((qp_init_attr
->sq_sig_type
==
1121 IB_SIGNAL_ALL_WR
) ? true : false);
1123 qp
->qplib_qp
.sq
.max_sge
= qp_init_attr
->cap
.max_send_sge
;
1124 if (qp
->qplib_qp
.sq
.max_sge
> dev_attr
->max_qp_sges
)
1125 qp
->qplib_qp
.sq
.max_sge
= dev_attr
->max_qp_sges
;
1127 if (qp_init_attr
->send_cq
) {
1128 cq
= container_of(qp_init_attr
->send_cq
, struct bnxt_re_cq
,
1131 dev_err(rdev_to_dev(rdev
), "Send CQ not found");
1135 qp
->qplib_qp
.scq
= &cq
->qplib_cq
;
1138 if (qp_init_attr
->recv_cq
) {
1139 cq
= container_of(qp_init_attr
->recv_cq
, struct bnxt_re_cq
,
1142 dev_err(rdev_to_dev(rdev
), "Receive CQ not found");
1146 qp
->qplib_qp
.rcq
= &cq
->qplib_cq
;
1149 if (qp_init_attr
->srq
) {
1150 dev_err(rdev_to_dev(rdev
), "SRQ not supported");
1154 /* Allocate 1 more than what's provided so posting max doesn't
1157 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_recv_wr
+ 1);
1158 qp
->qplib_qp
.rq
.max_wqe
= min_t(u32
, entries
,
1159 dev_attr
->max_qp_wqes
+ 1);
1161 qp
->qplib_qp
.rq
.q_full_delta
= qp
->qplib_qp
.rq
.max_wqe
-
1162 qp_init_attr
->cap
.max_recv_wr
;
1164 qp
->qplib_qp
.rq
.max_sge
= qp_init_attr
->cap
.max_recv_sge
;
1165 if (qp
->qplib_qp
.rq
.max_sge
> dev_attr
->max_qp_sges
)
1166 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1169 qp
->qplib_qp
.mtu
= ib_mtu_enum_to_int(iboe_get_mtu(rdev
->netdev
->mtu
));
1171 if (qp_init_attr
->qp_type
== IB_QPT_GSI
) {
1172 /* Allocate 1 more than what's provided */
1173 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_send_wr
+ 1);
1174 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1175 dev_attr
->max_qp_wqes
+ 1);
1176 qp
->qplib_qp
.sq
.q_full_delta
= qp
->qplib_qp
.sq
.max_wqe
-
1177 qp_init_attr
->cap
.max_send_wr
;
1178 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1179 if (qp
->qplib_qp
.rq
.max_sge
> dev_attr
->max_qp_sges
)
1180 qp
->qplib_qp
.rq
.max_sge
= dev_attr
->max_qp_sges
;
1181 qp
->qplib_qp
.sq
.max_sge
++;
1182 if (qp
->qplib_qp
.sq
.max_sge
> dev_attr
->max_qp_sges
)
1183 qp
->qplib_qp
.sq
.max_sge
= dev_attr
->max_qp_sges
;
1185 qp
->qplib_qp
.rq_hdr_buf_size
=
1186 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
1188 qp
->qplib_qp
.sq_hdr_buf_size
=
1189 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2
;
1190 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1191 rc
= bnxt_qplib_create_qp1(&rdev
->qplib_res
, &qp
->qplib_qp
);
1193 dev_err(rdev_to_dev(rdev
), "Failed to create HW QP1");
1196 /* Create a shadow QP to handle the QP1 traffic */
1197 rdev
->qp1_sqp
= bnxt_re_create_shadow_qp(pd
, &rdev
->qplib_res
,
1199 if (!rdev
->qp1_sqp
) {
1201 dev_err(rdev_to_dev(rdev
),
1202 "Failed to create Shadow QP for QP1");
1205 rdev
->sqp_ah
= bnxt_re_create_shadow_qp_ah(pd
, &rdev
->qplib_res
,
1207 if (!rdev
->sqp_ah
) {
1208 bnxt_qplib_destroy_qp(&rdev
->qplib_res
,
1209 &rdev
->qp1_sqp
->qplib_qp
);
1211 dev_err(rdev_to_dev(rdev
),
1212 "Failed to create AH entry for ShadowQP");
1217 /* Allocate 128 + 1 more than what's provided */
1218 entries
= roundup_pow_of_two(qp_init_attr
->cap
.max_send_wr
+
1219 BNXT_QPLIB_RESERVED_QP_WRS
+ 1);
1220 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1221 dev_attr
->max_qp_wqes
+
1222 BNXT_QPLIB_RESERVED_QP_WRS
+ 1);
1223 qp
->qplib_qp
.sq
.q_full_delta
= BNXT_QPLIB_RESERVED_QP_WRS
+ 1;
1226 * Reserving one slot for Phantom WQE. Application can
1227 * post one extra entry in this case. But allowing this to avoid
1228 * unexpected Queue full condition
1231 qp
->qplib_qp
.sq
.q_full_delta
-= 1;
1233 qp
->qplib_qp
.max_rd_atomic
= dev_attr
->max_qp_rd_atom
;
1234 qp
->qplib_qp
.max_dest_rd_atomic
= dev_attr
->max_qp_init_rd_atom
;
1236 rc
= bnxt_re_init_user_qp(rdev
, pd
, qp
, udata
);
1240 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1243 rc
= bnxt_qplib_create_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1245 dev_err(rdev_to_dev(rdev
), "Failed to create HW QP");
1250 qp
->ib_qp
.qp_num
= qp
->qplib_qp
.id
;
1251 spin_lock_init(&qp
->sq_lock
);
1252 spin_lock_init(&qp
->rq_lock
);
1255 struct bnxt_re_qp_resp resp
;
1257 resp
.qpid
= qp
->ib_qp
.qp_num
;
1259 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1261 dev_err(rdev_to_dev(rdev
), "Failed to copy QP udata");
1265 INIT_LIST_HEAD(&qp
->list
);
1266 mutex_lock(&rdev
->qp_lock
);
1267 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1268 atomic_inc(&rdev
->qp_count
);
1269 mutex_unlock(&rdev
->qp_lock
);
1273 bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1279 static u8
__from_ib_qp_state(enum ib_qp_state state
)
1283 return CMDQ_MODIFY_QP_NEW_STATE_RESET
;
1285 return CMDQ_MODIFY_QP_NEW_STATE_INIT
;
1287 return CMDQ_MODIFY_QP_NEW_STATE_RTR
;
1289 return CMDQ_MODIFY_QP_NEW_STATE_RTS
;
1291 return CMDQ_MODIFY_QP_NEW_STATE_SQD
;
1293 return CMDQ_MODIFY_QP_NEW_STATE_SQE
;
1296 return CMDQ_MODIFY_QP_NEW_STATE_ERR
;
1300 static enum ib_qp_state
__to_ib_qp_state(u8 state
)
1303 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
1304 return IB_QPS_RESET
;
1305 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
1307 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1309 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1311 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
1313 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
1315 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
1321 static u32
__from_ib_mtu(enum ib_mtu mtu
)
1325 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256
;
1327 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512
;
1329 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
;
1331 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1333 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
;
1335 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1339 static enum ib_mtu
__to_ib_mtu(u32 mtu
)
1341 switch (mtu
& CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) {
1342 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256
:
1344 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512
:
1346 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
:
1348 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
:
1350 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
:
1357 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev
*rdev
,
1358 struct bnxt_re_qp
*qp1_qp
,
1361 struct bnxt_re_qp
*qp
= rdev
->qp1_sqp
;
1364 if (qp_attr_mask
& IB_QP_STATE
) {
1365 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1366 qp
->qplib_qp
.state
= qp1_qp
->qplib_qp
.state
;
1368 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1369 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1370 qp
->qplib_qp
.pkey_index
= qp1_qp
->qplib_qp
.pkey_index
;
1373 if (qp_attr_mask
& IB_QP_QKEY
) {
1374 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1375 /* Using a Random QKEY */
1376 qp
->qplib_qp
.qkey
= 0x81818181;
1378 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1379 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1380 qp
->qplib_qp
.sq
.psn
= qp1_qp
->qplib_qp
.sq
.psn
;
1383 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1385 dev_err(rdev_to_dev(rdev
),
1386 "Failed to modify Shadow QP for QP1");
1390 int bnxt_re_modify_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
1391 int qp_attr_mask
, struct ib_udata
*udata
)
1393 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
1394 struct bnxt_re_dev
*rdev
= qp
->rdev
;
1395 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1396 enum ib_qp_state curr_qp_state
, new_qp_state
;
1400 struct ib_gid_attr sgid_attr
;
1403 qp
->qplib_qp
.modify_flags
= 0;
1404 if (qp_attr_mask
& IB_QP_STATE
) {
1405 curr_qp_state
= __to_ib_qp_state(qp
->qplib_qp
.cur_qp_state
);
1406 new_qp_state
= qp_attr
->qp_state
;
1407 if (!ib_modify_qp_is_ok(curr_qp_state
, new_qp_state
,
1408 ib_qp
->qp_type
, qp_attr_mask
,
1409 IB_LINK_LAYER_ETHERNET
)) {
1410 dev_err(rdev_to_dev(rdev
),
1411 "Invalid attribute mask: %#x specified ",
1413 dev_err(rdev_to_dev(rdev
),
1414 "for qpn: %#x type: %#x",
1415 ib_qp
->qp_num
, ib_qp
->qp_type
);
1416 dev_err(rdev_to_dev(rdev
),
1417 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1418 curr_qp_state
, new_qp_state
);
1421 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1422 qp
->qplib_qp
.state
= __from_ib_qp_state(qp_attr
->qp_state
);
1424 if (qp_attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
) {
1425 qp
->qplib_qp
.modify_flags
|=
1426 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY
;
1427 qp
->qplib_qp
.en_sqd_async_notify
= true;
1429 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
1430 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
;
1431 qp
->qplib_qp
.access
=
1432 __from_ib_access_flags(qp_attr
->qp_access_flags
);
1433 /* LOCAL_WRITE access must be set to allow RC receive */
1434 qp
->qplib_qp
.access
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
1436 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1437 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1438 qp
->qplib_qp
.pkey_index
= qp_attr
->pkey_index
;
1440 if (qp_attr_mask
& IB_QP_QKEY
) {
1441 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1442 qp
->qplib_qp
.qkey
= qp_attr
->qkey
;
1444 if (qp_attr_mask
& IB_QP_AV
) {
1445 const struct ib_global_route
*grh
=
1446 rdma_ah_read_grh(&qp_attr
->ah_attr
);
1448 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
1449 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
1450 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
1451 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
1452 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
1453 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
1454 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
1455 memcpy(qp
->qplib_qp
.ah
.dgid
.data
, grh
->dgid
.raw
,
1456 sizeof(qp
->qplib_qp
.ah
.dgid
.data
));
1457 qp
->qplib_qp
.ah
.flow_label
= grh
->flow_label
;
1458 /* If RoCE V2 is enabled, stack will have two entries for
1459 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1460 * the GID index by 2 for RoCE V2
1462 qp
->qplib_qp
.ah
.sgid_index
= grh
->sgid_index
/ 2;
1463 qp
->qplib_qp
.ah
.host_sgid_index
= grh
->sgid_index
;
1464 qp
->qplib_qp
.ah
.hop_limit
= grh
->hop_limit
;
1465 qp
->qplib_qp
.ah
.traffic_class
= grh
->traffic_class
;
1466 qp
->qplib_qp
.ah
.sl
= rdma_ah_get_sl(&qp_attr
->ah_attr
);
1467 ether_addr_copy(qp
->qplib_qp
.ah
.dmac
,
1468 qp_attr
->ah_attr
.roce
.dmac
);
1470 status
= ib_get_cached_gid(&rdev
->ibdev
, 1,
1473 if (!status
&& sgid_attr
.ndev
) {
1474 memcpy(qp
->qplib_qp
.smac
, sgid_attr
.ndev
->dev_addr
,
1476 dev_put(sgid_attr
.ndev
);
1477 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
,
1480 case RDMA_NETWORK_IPV4
:
1481 qp
->qplib_qp
.nw_type
=
1482 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4
;
1484 case RDMA_NETWORK_IPV6
:
1485 qp
->qplib_qp
.nw_type
=
1486 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
;
1489 qp
->qplib_qp
.nw_type
=
1490 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1
;
1496 if (qp_attr_mask
& IB_QP_PATH_MTU
) {
1497 qp
->qplib_qp
.modify_flags
|=
1498 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1499 qp
->qplib_qp
.path_mtu
= __from_ib_mtu(qp_attr
->path_mtu
);
1500 } else if (qp_attr
->qp_state
== IB_QPS_RTR
) {
1501 qp
->qplib_qp
.modify_flags
|=
1502 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1503 qp
->qplib_qp
.path_mtu
=
1504 __from_ib_mtu(iboe_get_mtu(rdev
->netdev
->mtu
));
1507 if (qp_attr_mask
& IB_QP_TIMEOUT
) {
1508 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
;
1509 qp
->qplib_qp
.timeout
= qp_attr
->timeout
;
1511 if (qp_attr_mask
& IB_QP_RETRY_CNT
) {
1512 qp
->qplib_qp
.modify_flags
|=
1513 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
;
1514 qp
->qplib_qp
.retry_cnt
= qp_attr
->retry_cnt
;
1516 if (qp_attr_mask
& IB_QP_RNR_RETRY
) {
1517 qp
->qplib_qp
.modify_flags
|=
1518 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
;
1519 qp
->qplib_qp
.rnr_retry
= qp_attr
->rnr_retry
;
1521 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1522 qp
->qplib_qp
.modify_flags
|=
1523 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
;
1524 qp
->qplib_qp
.min_rnr_timer
= qp_attr
->min_rnr_timer
;
1526 if (qp_attr_mask
& IB_QP_RQ_PSN
) {
1527 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
;
1528 qp
->qplib_qp
.rq
.psn
= qp_attr
->rq_psn
;
1530 if (qp_attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1531 qp
->qplib_qp
.modify_flags
|=
1532 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
;
1533 qp
->qplib_qp
.max_rd_atomic
= qp_attr
->max_rd_atomic
;
1535 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1536 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1537 qp
->qplib_qp
.sq
.psn
= qp_attr
->sq_psn
;
1539 if (qp_attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1540 qp
->qplib_qp
.modify_flags
|=
1541 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
;
1542 qp
->qplib_qp
.max_dest_rd_atomic
= qp_attr
->max_dest_rd_atomic
;
1544 if (qp_attr_mask
& IB_QP_CAP
) {
1545 qp
->qplib_qp
.modify_flags
|=
1546 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE
|
1547 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE
|
1548 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE
|
1549 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE
|
1550 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA
;
1551 if ((qp_attr
->cap
.max_send_wr
>= dev_attr
->max_qp_wqes
) ||
1552 (qp_attr
->cap
.max_recv_wr
>= dev_attr
->max_qp_wqes
) ||
1553 (qp_attr
->cap
.max_send_sge
>= dev_attr
->max_qp_sges
) ||
1554 (qp_attr
->cap
.max_recv_sge
>= dev_attr
->max_qp_sges
) ||
1555 (qp_attr
->cap
.max_inline_data
>=
1556 dev_attr
->max_inline_data
)) {
1557 dev_err(rdev_to_dev(rdev
),
1558 "Create QP failed - max exceeded");
1561 entries
= roundup_pow_of_two(qp_attr
->cap
.max_send_wr
);
1562 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
1563 dev_attr
->max_qp_wqes
+ 1);
1564 qp
->qplib_qp
.sq
.q_full_delta
= qp
->qplib_qp
.sq
.max_wqe
-
1565 qp_attr
->cap
.max_send_wr
;
1567 * Reserving one slot for Phantom WQE. Some application can
1568 * post one extra entry in this case. Allowing this to avoid
1569 * unexpected Queue full condition
1571 qp
->qplib_qp
.sq
.q_full_delta
-= 1;
1572 qp
->qplib_qp
.sq
.max_sge
= qp_attr
->cap
.max_send_sge
;
1573 if (qp
->qplib_qp
.rq
.max_wqe
) {
1574 entries
= roundup_pow_of_two(qp_attr
->cap
.max_recv_wr
);
1575 qp
->qplib_qp
.rq
.max_wqe
=
1576 min_t(u32
, entries
, dev_attr
->max_qp_wqes
+ 1);
1577 qp
->qplib_qp
.rq
.q_full_delta
= qp
->qplib_qp
.rq
.max_wqe
-
1578 qp_attr
->cap
.max_recv_wr
;
1579 qp
->qplib_qp
.rq
.max_sge
= qp_attr
->cap
.max_recv_sge
;
1581 /* SRQ was used prior, just ignore the RQ caps */
1584 if (qp_attr_mask
& IB_QP_DEST_QPN
) {
1585 qp
->qplib_qp
.modify_flags
|=
1586 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
;
1587 qp
->qplib_qp
.dest_qpn
= qp_attr
->dest_qp_num
;
1589 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1591 dev_err(rdev_to_dev(rdev
), "Failed to modify HW QP");
1594 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->qp1_sqp
)
1595 rc
= bnxt_re_modify_shadow_qp(rdev
, qp
, qp_attr_mask
);
1599 int bnxt_re_query_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
1600 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1602 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
1603 struct bnxt_re_dev
*rdev
= qp
->rdev
;
1604 struct bnxt_qplib_qp qplib_qp
;
1607 memset(&qplib_qp
, 0, sizeof(struct bnxt_qplib_qp
));
1608 qplib_qp
.id
= qp
->qplib_qp
.id
;
1609 qplib_qp
.ah
.host_sgid_index
= qp
->qplib_qp
.ah
.host_sgid_index
;
1611 rc
= bnxt_qplib_query_qp(&rdev
->qplib_res
, &qplib_qp
);
1613 dev_err(rdev_to_dev(rdev
), "Failed to query HW QP");
1616 qp_attr
->qp_state
= __to_ib_qp_state(qplib_qp
.state
);
1617 qp_attr
->en_sqd_async_notify
= qplib_qp
.en_sqd_async_notify
? 1 : 0;
1618 qp_attr
->qp_access_flags
= __to_ib_access_flags(qplib_qp
.access
);
1619 qp_attr
->pkey_index
= qplib_qp
.pkey_index
;
1620 qp_attr
->qkey
= qplib_qp
.qkey
;
1621 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
1622 rdma_ah_set_grh(&qp_attr
->ah_attr
, NULL
, qplib_qp
.ah
.flow_label
,
1623 qplib_qp
.ah
.host_sgid_index
,
1624 qplib_qp
.ah
.hop_limit
,
1625 qplib_qp
.ah
.traffic_class
);
1626 rdma_ah_set_dgid_raw(&qp_attr
->ah_attr
, qplib_qp
.ah
.dgid
.data
);
1627 rdma_ah_set_sl(&qp_attr
->ah_attr
, qplib_qp
.ah
.sl
);
1628 ether_addr_copy(qp_attr
->ah_attr
.roce
.dmac
, qplib_qp
.ah
.dmac
);
1629 qp_attr
->path_mtu
= __to_ib_mtu(qplib_qp
.path_mtu
);
1630 qp_attr
->timeout
= qplib_qp
.timeout
;
1631 qp_attr
->retry_cnt
= qplib_qp
.retry_cnt
;
1632 qp_attr
->rnr_retry
= qplib_qp
.rnr_retry
;
1633 qp_attr
->min_rnr_timer
= qplib_qp
.min_rnr_timer
;
1634 qp_attr
->rq_psn
= qplib_qp
.rq
.psn
;
1635 qp_attr
->max_rd_atomic
= qplib_qp
.max_rd_atomic
;
1636 qp_attr
->sq_psn
= qplib_qp
.sq
.psn
;
1637 qp_attr
->max_dest_rd_atomic
= qplib_qp
.max_dest_rd_atomic
;
1638 qp_init_attr
->sq_sig_type
= qplib_qp
.sig_type
? IB_SIGNAL_ALL_WR
:
1640 qp_attr
->dest_qp_num
= qplib_qp
.dest_qpn
;
1642 qp_attr
->cap
.max_send_wr
= qp
->qplib_qp
.sq
.max_wqe
;
1643 qp_attr
->cap
.max_send_sge
= qp
->qplib_qp
.sq
.max_sge
;
1644 qp_attr
->cap
.max_recv_wr
= qp
->qplib_qp
.rq
.max_wqe
;
1645 qp_attr
->cap
.max_recv_sge
= qp
->qplib_qp
.rq
.max_sge
;
1646 qp_attr
->cap
.max_inline_data
= qp
->qplib_qp
.max_inline_data
;
1647 qp_init_attr
->cap
= qp_attr
->cap
;
1652 /* Routine for sending QP1 packets for RoCE V1 an V2
1654 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp
*qp
,
1655 struct ib_send_wr
*wr
,
1656 struct bnxt_qplib_swqe
*wqe
,
1659 struct ib_device
*ibdev
= &qp
->rdev
->ibdev
;
1660 struct bnxt_re_ah
*ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
,
1662 struct bnxt_qplib_ah
*qplib_ah
= &ah
->qplib_ah
;
1663 struct bnxt_qplib_sge sge
;
1667 struct ib_gid_attr sgid_attr
;
1669 bool is_eth
= false;
1670 bool is_vlan
= false;
1671 bool is_grh
= false;
1672 bool is_udp
= false;
1674 u16 vlan_id
= 0xFFFF;
1676 int i
, rc
= 0, size
;
1678 memset(&qp
->qp1_hdr
, 0, sizeof(qp
->qp1_hdr
));
1680 rc
= ib_get_cached_gid(ibdev
, 1,
1681 qplib_ah
->host_sgid_index
, &sgid
,
1684 dev_err(rdev_to_dev(qp
->rdev
),
1685 "Failed to query gid at index %d",
1686 qplib_ah
->host_sgid_index
);
1689 if (sgid_attr
.ndev
) {
1690 if (is_vlan_dev(sgid_attr
.ndev
))
1691 vlan_id
= vlan_dev_vlan_id(sgid_attr
.ndev
);
1692 dev_put(sgid_attr
.ndev
);
1694 /* Get network header type for this GID */
1695 nw_type
= ib_gid_to_network_type(sgid_attr
.gid_type
, &sgid
);
1697 case RDMA_NETWORK_IPV4
:
1698 nw_type
= BNXT_RE_ROCEV2_IPV4_PACKET
;
1700 case RDMA_NETWORK_IPV6
:
1701 nw_type
= BNXT_RE_ROCEV2_IPV6_PACKET
;
1704 nw_type
= BNXT_RE_ROCE_V1_PACKET
;
1707 memcpy(&dgid
.raw
, &qplib_ah
->dgid
, 16);
1708 is_udp
= sgid_attr
.gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
1710 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
)) {
1712 ether_type
= ETH_P_IP
;
1715 ether_type
= ETH_P_IPV6
;
1719 ether_type
= ETH_P_IBOE
;
1724 is_vlan
= (vlan_id
&& (vlan_id
< 0x1000)) ? true : false;
1726 ib_ud_header_init(payload_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
1727 ip_version
, is_udp
, 0, &qp
->qp1_hdr
);
1730 ether_addr_copy(qp
->qp1_hdr
.eth
.dmac_h
, ah
->qplib_ah
.dmac
);
1731 ether_addr_copy(qp
->qp1_hdr
.eth
.smac_h
, qp
->qplib_qp
.smac
);
1733 /* For vlan, check the sgid for vlan existence */
1736 qp
->qp1_hdr
.eth
.type
= cpu_to_be16(ether_type
);
1738 qp
->qp1_hdr
.vlan
.type
= cpu_to_be16(ether_type
);
1739 qp
->qp1_hdr
.vlan
.tag
= cpu_to_be16(vlan_id
);
1742 if (is_grh
|| (ip_version
== 6)) {
1743 memcpy(qp
->qp1_hdr
.grh
.source_gid
.raw
, sgid
.raw
, sizeof(sgid
));
1744 memcpy(qp
->qp1_hdr
.grh
.destination_gid
.raw
, qplib_ah
->dgid
.data
,
1746 qp
->qp1_hdr
.grh
.hop_limit
= qplib_ah
->hop_limit
;
1749 if (ip_version
== 4) {
1750 qp
->qp1_hdr
.ip4
.tos
= 0;
1751 qp
->qp1_hdr
.ip4
.id
= 0;
1752 qp
->qp1_hdr
.ip4
.frag_off
= htons(IP_DF
);
1753 qp
->qp1_hdr
.ip4
.ttl
= qplib_ah
->hop_limit
;
1755 memcpy(&qp
->qp1_hdr
.ip4
.saddr
, sgid
.raw
+ 12, 4);
1756 memcpy(&qp
->qp1_hdr
.ip4
.daddr
, qplib_ah
->dgid
.data
+ 12, 4);
1757 qp
->qp1_hdr
.ip4
.check
= ib_ud_ip4_csum(&qp
->qp1_hdr
);
1761 qp
->qp1_hdr
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
1762 qp
->qp1_hdr
.udp
.sport
= htons(0x8CD1);
1763 qp
->qp1_hdr
.udp
.csum
= 0;
1767 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
1768 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1769 qp
->qp1_hdr
.immediate_present
= 1;
1771 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1773 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1774 qp
->qp1_hdr
.bth
.solicited_event
= 1;
1776 qp
->qp1_hdr
.bth
.pad_count
= (4 - payload_size
) & 3;
1778 /* P_key for QP1 is for all members */
1779 qp
->qp1_hdr
.bth
.pkey
= cpu_to_be16(0xFFFF);
1780 qp
->qp1_hdr
.bth
.destination_qpn
= IB_QP1
;
1781 qp
->qp1_hdr
.bth
.ack_req
= 0;
1783 qp
->send_psn
&= BTH_PSN_MASK
;
1784 qp
->qp1_hdr
.bth
.psn
= cpu_to_be32(qp
->send_psn
);
1786 /* Use the priviledged Q_Key for QP1 */
1787 qp
->qp1_hdr
.deth
.qkey
= cpu_to_be32(IB_QP1_QKEY
);
1788 qp
->qp1_hdr
.deth
.source_qpn
= IB_QP1
;
1790 /* Pack the QP1 to the transmit buffer */
1791 buf
= bnxt_qplib_get_qp1_sq_buf(&qp
->qplib_qp
, &sge
);
1793 size
= ib_ud_header_pack(&qp
->qp1_hdr
, buf
);
1794 for (i
= wqe
->num_sge
; i
; i
--) {
1795 wqe
->sg_list
[i
].addr
= wqe
->sg_list
[i
- 1].addr
;
1796 wqe
->sg_list
[i
].lkey
= wqe
->sg_list
[i
- 1].lkey
;
1797 wqe
->sg_list
[i
].size
= wqe
->sg_list
[i
- 1].size
;
1801 * Max Header buf size for IPV6 RoCE V2 is 86,
1802 * which is same as the QP1 SQ header buffer.
1803 * Header buf size for IPV4 RoCE V2 can be 66.
1804 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1805 * Subtract 20 bytes from QP1 SQ header buf size
1807 if (is_udp
&& ip_version
== 4)
1810 * Max Header buf size for RoCE V1 is 78.
1811 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1812 * Subtract 8 bytes from QP1 SQ header buf size
1817 /* Subtract 4 bytes for non vlan packets */
1821 wqe
->sg_list
[0].addr
= sge
.addr
;
1822 wqe
->sg_list
[0].lkey
= sge
.lkey
;
1823 wqe
->sg_list
[0].size
= sge
.size
;
1827 dev_err(rdev_to_dev(qp
->rdev
), "QP1 buffer is empty!");
1833 /* For the MAD layer, it only provides the recv SGE the size of
1834 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1835 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1836 * receive packet (334 bytes) with no VLAN and then copy the GRH
1837 * and the MAD datagram out to the provided SGE.
1839 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp
*qp
,
1840 struct ib_recv_wr
*wr
,
1841 struct bnxt_qplib_swqe
*wqe
,
1844 struct bnxt_qplib_sge ref
, sge
;
1846 struct bnxt_re_sqp_entries
*sqp_entry
;
1848 rq_prod_index
= bnxt_qplib_get_rq_prod_index(&qp
->qplib_qp
);
1850 if (!bnxt_qplib_get_qp1_rq_buf(&qp
->qplib_qp
, &sge
))
1853 /* Create 1 SGE to receive the entire
1856 /* Save the reference from ULP */
1857 ref
.addr
= wqe
->sg_list
[0].addr
;
1858 ref
.lkey
= wqe
->sg_list
[0].lkey
;
1859 ref
.size
= wqe
->sg_list
[0].size
;
1861 sqp_entry
= &qp
->rdev
->sqp_tbl
[rq_prod_index
];
1864 wqe
->sg_list
[0].addr
= sge
.addr
;
1865 wqe
->sg_list
[0].lkey
= sge
.lkey
;
1866 wqe
->sg_list
[0].size
= BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
1867 sge
.size
-= wqe
->sg_list
[0].size
;
1869 sqp_entry
->sge
.addr
= ref
.addr
;
1870 sqp_entry
->sge
.lkey
= ref
.lkey
;
1871 sqp_entry
->sge
.size
= ref
.size
;
1872 /* Store the wrid for reporting completion */
1873 sqp_entry
->wrid
= wqe
->wr_id
;
1874 /* change the wqe->wrid to table index */
1875 wqe
->wr_id
= rq_prod_index
;
1879 static int is_ud_qp(struct bnxt_re_qp
*qp
)
1881 return qp
->qplib_qp
.type
== CMDQ_CREATE_QP_TYPE_UD
;
1884 static int bnxt_re_build_send_wqe(struct bnxt_re_qp
*qp
,
1885 struct ib_send_wr
*wr
,
1886 struct bnxt_qplib_swqe
*wqe
)
1888 struct bnxt_re_ah
*ah
= NULL
;
1891 ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
, ib_ah
);
1892 wqe
->send
.q_key
= ud_wr(wr
)->remote_qkey
;
1893 wqe
->send
.dst_qp
= ud_wr(wr
)->remote_qpn
;
1894 wqe
->send
.avid
= ah
->qplib_ah
.id
;
1896 switch (wr
->opcode
) {
1898 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
1900 case IB_WR_SEND_WITH_IMM
:
1901 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
;
1902 wqe
->send
.imm_data
= wr
->ex
.imm_data
;
1904 case IB_WR_SEND_WITH_INV
:
1905 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
;
1906 wqe
->send
.inv_key
= wr
->ex
.invalidate_rkey
;
1911 if (wr
->send_flags
& IB_SEND_SIGNALED
)
1912 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
1913 if (wr
->send_flags
& IB_SEND_FENCE
)
1914 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
1915 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1916 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
1917 if (wr
->send_flags
& IB_SEND_INLINE
)
1918 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
1923 static int bnxt_re_build_rdma_wqe(struct ib_send_wr
*wr
,
1924 struct bnxt_qplib_swqe
*wqe
)
1926 switch (wr
->opcode
) {
1927 case IB_WR_RDMA_WRITE
:
1928 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
;
1930 case IB_WR_RDMA_WRITE_WITH_IMM
:
1931 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
;
1932 wqe
->rdma
.imm_data
= wr
->ex
.imm_data
;
1934 case IB_WR_RDMA_READ
:
1935 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_READ
;
1936 wqe
->rdma
.inv_key
= wr
->ex
.invalidate_rkey
;
1941 wqe
->rdma
.remote_va
= rdma_wr(wr
)->remote_addr
;
1942 wqe
->rdma
.r_key
= rdma_wr(wr
)->rkey
;
1943 if (wr
->send_flags
& IB_SEND_SIGNALED
)
1944 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
1945 if (wr
->send_flags
& IB_SEND_FENCE
)
1946 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
1947 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1948 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
1949 if (wr
->send_flags
& IB_SEND_INLINE
)
1950 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
1955 static int bnxt_re_build_atomic_wqe(struct ib_send_wr
*wr
,
1956 struct bnxt_qplib_swqe
*wqe
)
1958 switch (wr
->opcode
) {
1959 case IB_WR_ATOMIC_CMP_AND_SWP
:
1960 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
;
1961 wqe
->atomic
.swap_data
= atomic_wr(wr
)->swap
;
1963 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1964 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
;
1965 wqe
->atomic
.cmp_data
= atomic_wr(wr
)->compare_add
;
1970 wqe
->atomic
.remote_va
= atomic_wr(wr
)->remote_addr
;
1971 wqe
->atomic
.r_key
= atomic_wr(wr
)->rkey
;
1972 if (wr
->send_flags
& IB_SEND_SIGNALED
)
1973 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
1974 if (wr
->send_flags
& IB_SEND_FENCE
)
1975 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
1976 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1977 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
1981 static int bnxt_re_build_inv_wqe(struct ib_send_wr
*wr
,
1982 struct bnxt_qplib_swqe
*wqe
)
1984 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
;
1985 wqe
->local_inv
.inv_l_key
= wr
->ex
.invalidate_rkey
;
1987 if (wr
->send_flags
& IB_SEND_SIGNALED
)
1988 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
1989 if (wr
->send_flags
& IB_SEND_FENCE
)
1990 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
1991 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1992 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
1997 static int bnxt_re_build_reg_wqe(struct ib_reg_wr
*wr
,
1998 struct bnxt_qplib_swqe
*wqe
)
2000 struct bnxt_re_mr
*mr
= container_of(wr
->mr
, struct bnxt_re_mr
, ib_mr
);
2001 struct bnxt_qplib_frpl
*qplib_frpl
= &mr
->qplib_frpl
;
2002 int access
= wr
->access
;
2004 wqe
->frmr
.pbl_ptr
= (__le64
*)qplib_frpl
->hwq
.pbl_ptr
[0];
2005 wqe
->frmr
.pbl_dma_ptr
= qplib_frpl
->hwq
.pbl_dma_ptr
[0];
2006 wqe
->frmr
.page_list
= mr
->pages
;
2007 wqe
->frmr
.page_list_len
= mr
->npages
;
2008 wqe
->frmr
.levels
= qplib_frpl
->hwq
.level
+ 1;
2009 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_REG_MR
;
2011 if (wr
->wr
.send_flags
& IB_SEND_FENCE
)
2012 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2013 if (wr
->wr
.send_flags
& IB_SEND_SIGNALED
)
2014 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2016 if (access
& IB_ACCESS_LOCAL_WRITE
)
2017 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
2018 if (access
& IB_ACCESS_REMOTE_READ
)
2019 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ
;
2020 if (access
& IB_ACCESS_REMOTE_WRITE
)
2021 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE
;
2022 if (access
& IB_ACCESS_REMOTE_ATOMIC
)
2023 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC
;
2024 if (access
& IB_ACCESS_MW_BIND
)
2025 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND
;
2027 wqe
->frmr
.l_key
= wr
->key
;
2028 wqe
->frmr
.length
= wr
->mr
->length
;
2029 wqe
->frmr
.pbl_pg_sz_log
= (wr
->mr
->page_size
>> PAGE_SHIFT_4K
) - 1;
2030 wqe
->frmr
.va
= wr
->mr
->iova
;
2034 static int bnxt_re_copy_inline_data(struct bnxt_re_dev
*rdev
,
2035 struct ib_send_wr
*wr
,
2036 struct bnxt_qplib_swqe
*wqe
)
2038 /* Copy the inline data to the data field */
2043 in_data
= wqe
->inline_data
;
2044 for (i
= 0; i
< wr
->num_sge
; i
++) {
2045 sge_addr
= (void *)(unsigned long)
2046 wr
->sg_list
[i
].addr
;
2047 sge_len
= wr
->sg_list
[i
].length
;
2049 if ((sge_len
+ wqe
->inline_len
) >
2050 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
) {
2051 dev_err(rdev_to_dev(rdev
),
2052 "Inline data size requested > supported value");
2055 sge_len
= wr
->sg_list
[i
].length
;
2057 memcpy(in_data
, sge_addr
, sge_len
);
2058 in_data
+= wr
->sg_list
[i
].length
;
2059 wqe
->inline_len
+= wr
->sg_list
[i
].length
;
2061 return wqe
->inline_len
;
2064 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev
*rdev
,
2065 struct ib_send_wr
*wr
,
2066 struct bnxt_qplib_swqe
*wqe
)
2070 if (wr
->send_flags
& IB_SEND_INLINE
)
2071 payload_sz
= bnxt_re_copy_inline_data(rdev
, wr
, wqe
);
2073 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
->sg_list
,
2079 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp
*qp
)
2081 if ((qp
->ib_qp
.qp_type
== IB_QPT_UD
||
2082 qp
->ib_qp
.qp_type
== IB_QPT_GSI
||
2083 qp
->ib_qp
.qp_type
== IB_QPT_RAW_ETHERTYPE
) &&
2084 qp
->qplib_qp
.wqe_cnt
== BNXT_RE_UD_QP_HW_STALL
) {
2086 struct ib_qp_attr qp_attr
;
2088 qp_attr_mask
= IB_QP_STATE
;
2089 qp_attr
.qp_state
= IB_QPS_RTS
;
2090 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, qp_attr_mask
, NULL
);
2091 qp
->qplib_qp
.wqe_cnt
= 0;
2095 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev
*rdev
,
2096 struct bnxt_re_qp
*qp
,
2097 struct ib_send_wr
*wr
)
2099 struct bnxt_qplib_swqe wqe
;
2100 int rc
= 0, payload_sz
= 0;
2101 unsigned long flags
;
2103 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2104 memset(&wqe
, 0, sizeof(wqe
));
2107 memset(&wqe
, 0, sizeof(wqe
));
2110 wqe
.num_sge
= wr
->num_sge
;
2111 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2112 dev_err(rdev_to_dev(rdev
),
2113 "Limit exceeded for Send SGEs");
2118 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2119 if (payload_sz
< 0) {
2123 wqe
.wr_id
= wr
->wr_id
;
2125 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
2127 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2129 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2132 dev_err(rdev_to_dev(rdev
),
2133 "Post send failed opcode = %#x rc = %d",
2139 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2140 bnxt_ud_qp_hw_stall_workaround(qp
);
2141 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2145 int bnxt_re_post_send(struct ib_qp
*ib_qp
, struct ib_send_wr
*wr
,
2146 struct ib_send_wr
**bad_wr
)
2148 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2149 struct bnxt_qplib_swqe wqe
;
2150 int rc
= 0, payload_sz
= 0;
2151 unsigned long flags
;
2153 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2156 memset(&wqe
, 0, sizeof(wqe
));
2159 wqe
.num_sge
= wr
->num_sge
;
2160 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2161 dev_err(rdev_to_dev(qp
->rdev
),
2162 "Limit exceeded for Send SGEs");
2167 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2168 if (payload_sz
< 0) {
2172 wqe
.wr_id
= wr
->wr_id
;
2174 switch (wr
->opcode
) {
2176 case IB_WR_SEND_WITH_IMM
:
2177 if (ib_qp
->qp_type
== IB_QPT_GSI
) {
2178 rc
= bnxt_re_build_qp1_send_v2(qp
, wr
, &wqe
,
2182 wqe
.rawqp1
.lflags
|=
2183 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC
;
2185 switch (wr
->send_flags
) {
2186 case IB_SEND_IP_CSUM
:
2187 wqe
.rawqp1
.lflags
|=
2188 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM
;
2193 /* Fall thru to build the wqe */
2194 case IB_WR_SEND_WITH_INV
:
2195 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2197 case IB_WR_RDMA_WRITE
:
2198 case IB_WR_RDMA_WRITE_WITH_IMM
:
2199 case IB_WR_RDMA_READ
:
2200 rc
= bnxt_re_build_rdma_wqe(wr
, &wqe
);
2202 case IB_WR_ATOMIC_CMP_AND_SWP
:
2203 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2204 rc
= bnxt_re_build_atomic_wqe(wr
, &wqe
);
2206 case IB_WR_RDMA_READ_WITH_INV
:
2207 dev_err(rdev_to_dev(qp
->rdev
),
2208 "RDMA Read with Invalidate is not supported");
2211 case IB_WR_LOCAL_INV
:
2212 rc
= bnxt_re_build_inv_wqe(wr
, &wqe
);
2215 rc
= bnxt_re_build_reg_wqe(reg_wr(wr
), &wqe
);
2218 /* Unsupported WRs */
2219 dev_err(rdev_to_dev(qp
->rdev
),
2220 "WR (%#x) is not supported", wr
->opcode
);
2225 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2228 dev_err(rdev_to_dev(qp
->rdev
),
2229 "post_send failed op:%#x qps = %#x rc = %d\n",
2230 wr
->opcode
, qp
->qplib_qp
.state
, rc
);
2236 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2237 bnxt_ud_qp_hw_stall_workaround(qp
);
2238 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2243 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev
*rdev
,
2244 struct bnxt_re_qp
*qp
,
2245 struct ib_recv_wr
*wr
)
2247 struct bnxt_qplib_swqe wqe
;
2248 int rc
= 0, payload_sz
= 0;
2250 memset(&wqe
, 0, sizeof(wqe
));
2253 memset(&wqe
, 0, sizeof(wqe
));
2256 wqe
.num_sge
= wr
->num_sge
;
2257 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2258 dev_err(rdev_to_dev(rdev
),
2259 "Limit exceeded for Receive SGEs");
2263 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
,
2265 wqe
.wr_id
= wr
->wr_id
;
2266 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2268 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2275 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2279 int bnxt_re_post_recv(struct ib_qp
*ib_qp
, struct ib_recv_wr
*wr
,
2280 struct ib_recv_wr
**bad_wr
)
2282 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2283 struct bnxt_qplib_swqe wqe
;
2284 int rc
= 0, payload_sz
= 0;
2285 unsigned long flags
;
2288 spin_lock_irqsave(&qp
->rq_lock
, flags
);
2291 memset(&wqe
, 0, sizeof(wqe
));
2294 wqe
.num_sge
= wr
->num_sge
;
2295 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2296 dev_err(rdev_to_dev(qp
->rdev
),
2297 "Limit exceeded for Receive SGEs");
2303 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
,
2305 wqe
.wr_id
= wr
->wr_id
;
2306 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2308 if (ib_qp
->qp_type
== IB_QPT_GSI
)
2309 rc
= bnxt_re_build_qp1_shadow_qp_recv(qp
, wr
, &wqe
,
2312 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2318 /* Ring DB if the RQEs posted reaches a threshold value */
2319 if (++count
>= BNXT_RE_RQ_WQE_THRESHOLD
) {
2320 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2328 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2330 spin_unlock_irqrestore(&qp
->rq_lock
, flags
);
2335 /* Completion Queues */
2336 int bnxt_re_destroy_cq(struct ib_cq
*ib_cq
)
2338 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
2339 struct bnxt_re_dev
*rdev
= cq
->rdev
;
2342 rc
= bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2344 dev_err(rdev_to_dev(rdev
), "Failed to destroy HW CQ");
2347 if (!IS_ERR_OR_NULL(cq
->umem
))
2348 ib_umem_release(cq
->umem
);
2354 atomic_dec(&rdev
->cq_count
);
2359 struct ib_cq
*bnxt_re_create_cq(struct ib_device
*ibdev
,
2360 const struct ib_cq_init_attr
*attr
,
2361 struct ib_ucontext
*context
,
2362 struct ib_udata
*udata
)
2364 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
2365 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
2366 struct bnxt_re_cq
*cq
= NULL
;
2368 int cqe
= attr
->cqe
;
2370 /* Validate CQ fields */
2371 if (cqe
< 1 || cqe
> dev_attr
->max_cq_wqes
) {
2372 dev_err(rdev_to_dev(rdev
), "Failed to create CQ -max exceeded");
2373 return ERR_PTR(-EINVAL
);
2375 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
2377 return ERR_PTR(-ENOMEM
);
2380 cq
->qplib_cq
.cq_handle
= (u64
)(unsigned long)(&cq
->qplib_cq
);
2382 entries
= roundup_pow_of_two(cqe
+ 1);
2383 if (entries
> dev_attr
->max_cq_wqes
+ 1)
2384 entries
= dev_attr
->max_cq_wqes
+ 1;
2387 struct bnxt_re_cq_req req
;
2388 struct bnxt_re_ucontext
*uctx
= container_of
2390 struct bnxt_re_ucontext
,
2392 if (ib_copy_from_udata(&req
, udata
, sizeof(req
))) {
2397 cq
->umem
= ib_umem_get(context
, req
.cq_va
,
2398 entries
* sizeof(struct cq_base
),
2399 IB_ACCESS_LOCAL_WRITE
, 1);
2400 if (IS_ERR(cq
->umem
)) {
2401 rc
= PTR_ERR(cq
->umem
);
2404 cq
->qplib_cq
.sghead
= cq
->umem
->sg_head
.sgl
;
2405 cq
->qplib_cq
.nmap
= cq
->umem
->nmap
;
2406 cq
->qplib_cq
.dpi
= uctx
->dpi
;
2408 cq
->max_cql
= min_t(u32
, entries
, MAX_CQL_PER_POLL
);
2409 cq
->cql
= kcalloc(cq
->max_cql
, sizeof(struct bnxt_qplib_cqe
),
2416 cq
->qplib_cq
.dpi
= &rdev
->dpi_privileged
;
2417 cq
->qplib_cq
.sghead
= NULL
;
2418 cq
->qplib_cq
.nmap
= 0;
2420 cq
->qplib_cq
.max_wqe
= entries
;
2421 cq
->qplib_cq
.cnq_hw_ring_id
= rdev
->nq
.ring_id
;
2423 rc
= bnxt_qplib_create_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2425 dev_err(rdev_to_dev(rdev
), "Failed to create HW CQ");
2429 cq
->ib_cq
.cqe
= entries
;
2430 cq
->cq_period
= cq
->qplib_cq
.period
;
2433 atomic_inc(&rdev
->cq_count
);
2436 struct bnxt_re_cq_resp resp
;
2438 resp
.cqid
= cq
->qplib_cq
.id
;
2439 resp
.tail
= cq
->qplib_cq
.hwq
.cons
;
2440 resp
.phase
= cq
->qplib_cq
.period
;
2442 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
2444 dev_err(rdev_to_dev(rdev
), "Failed to copy CQ udata");
2445 bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2454 ib_umem_release(cq
->umem
);
2461 static u8
__req_to_ib_wc_status(u8 qstatus
)
2464 case CQ_REQ_STATUS_OK
:
2465 return IB_WC_SUCCESS
;
2466 case CQ_REQ_STATUS_BAD_RESPONSE_ERR
:
2467 return IB_WC_BAD_RESP_ERR
;
2468 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR
:
2469 return IB_WC_LOC_LEN_ERR
;
2470 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR
:
2471 return IB_WC_LOC_QP_OP_ERR
;
2472 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR
:
2473 return IB_WC_LOC_PROT_ERR
;
2474 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR
:
2475 return IB_WC_GENERAL_ERR
;
2476 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2477 return IB_WC_REM_INV_REQ_ERR
;
2478 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR
:
2479 return IB_WC_REM_ACCESS_ERR
;
2480 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR
:
2481 return IB_WC_REM_OP_ERR
;
2482 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR
:
2483 return IB_WC_RNR_RETRY_EXC_ERR
;
2484 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR
:
2485 return IB_WC_RETRY_EXC_ERR
;
2486 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2487 return IB_WC_WR_FLUSH_ERR
;
2489 return IB_WC_GENERAL_ERR
;
2494 static u8
__rawqp1_to_ib_wc_status(u8 qstatus
)
2497 case CQ_RES_RAWETH_QP1_STATUS_OK
:
2498 return IB_WC_SUCCESS
;
2499 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR
:
2500 return IB_WC_LOC_ACCESS_ERR
;
2501 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR
:
2502 return IB_WC_LOC_LEN_ERR
;
2503 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR
:
2504 return IB_WC_LOC_PROT_ERR
;
2505 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR
:
2506 return IB_WC_LOC_QP_OP_ERR
;
2507 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR
:
2508 return IB_WC_GENERAL_ERR
;
2509 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2510 return IB_WC_WR_FLUSH_ERR
;
2511 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR
:
2512 return IB_WC_WR_FLUSH_ERR
;
2514 return IB_WC_GENERAL_ERR
;
2518 static u8
__rc_to_ib_wc_status(u8 qstatus
)
2521 case CQ_RES_RC_STATUS_OK
:
2522 return IB_WC_SUCCESS
;
2523 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR
:
2524 return IB_WC_LOC_ACCESS_ERR
;
2525 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR
:
2526 return IB_WC_LOC_LEN_ERR
;
2527 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR
:
2528 return IB_WC_LOC_PROT_ERR
;
2529 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR
:
2530 return IB_WC_LOC_QP_OP_ERR
;
2531 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR
:
2532 return IB_WC_GENERAL_ERR
;
2533 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2534 return IB_WC_REM_INV_REQ_ERR
;
2535 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2536 return IB_WC_WR_FLUSH_ERR
;
2537 case CQ_RES_RC_STATUS_HW_FLUSH_ERR
:
2538 return IB_WC_WR_FLUSH_ERR
;
2540 return IB_WC_GENERAL_ERR
;
2544 static void bnxt_re_process_req_wc(struct ib_wc
*wc
, struct bnxt_qplib_cqe
*cqe
)
2546 switch (cqe
->type
) {
2547 case BNXT_QPLIB_SWQE_TYPE_SEND
:
2548 wc
->opcode
= IB_WC_SEND
;
2550 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
2551 wc
->opcode
= IB_WC_SEND
;
2552 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2554 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
2555 wc
->opcode
= IB_WC_SEND
;
2556 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2558 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
2559 wc
->opcode
= IB_WC_RDMA_WRITE
;
2561 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
2562 wc
->opcode
= IB_WC_RDMA_WRITE
;
2563 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2565 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
2566 wc
->opcode
= IB_WC_RDMA_READ
;
2568 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
2569 wc
->opcode
= IB_WC_COMP_SWAP
;
2571 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
2572 wc
->opcode
= IB_WC_FETCH_ADD
;
2574 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
2575 wc
->opcode
= IB_WC_LOCAL_INV
;
2577 case BNXT_QPLIB_SWQE_TYPE_REG_MR
:
2578 wc
->opcode
= IB_WC_REG_MR
;
2581 wc
->opcode
= IB_WC_SEND
;
2585 wc
->status
= __req_to_ib_wc_status(cqe
->status
);
2588 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags
,
2589 u16 raweth_qp1_flags2
)
2591 bool is_udp
= false, is_ipv6
= false, is_ipv4
= false;
2593 /* raweth_qp1_flags Bit 9-6 indicates itype */
2594 if ((raweth_qp1_flags
& CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
2595 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
2598 if (raweth_qp1_flags2
&
2599 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC
&&
2601 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC
) {
2603 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2604 (raweth_qp1_flags2
&
2605 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE
) ?
2606 (is_ipv6
= true) : (is_ipv4
= true);
2608 BNXT_RE_ROCEV2_IPV6_PACKET
:
2609 BNXT_RE_ROCEV2_IPV4_PACKET
);
2611 return BNXT_RE_ROCE_V1_PACKET
;
2615 static int bnxt_re_to_ib_nw_type(int nw_type
)
2617 u8 nw_hdr_type
= 0xFF;
2620 case BNXT_RE_ROCE_V1_PACKET
:
2621 nw_hdr_type
= RDMA_NETWORK_ROCE_V1
;
2623 case BNXT_RE_ROCEV2_IPV4_PACKET
:
2624 nw_hdr_type
= RDMA_NETWORK_IPV4
;
2626 case BNXT_RE_ROCEV2_IPV6_PACKET
:
2627 nw_hdr_type
= RDMA_NETWORK_IPV6
;
2633 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev
*rdev
,
2637 struct ethhdr
*eth_hdr
;
2641 tmp_buf
= (u8
*)rq_hdr_buf
;
2643 * If dest mac is not same as I/F mac, this could be a
2644 * loopback address or multicast address, check whether
2645 * it is a loopback packet
2647 if (!ether_addr_equal(tmp_buf
, rdev
->netdev
->dev_addr
)) {
2649 /* Check the ether type */
2650 eth_hdr
= (struct ethhdr
*)tmp_buf
;
2651 eth_type
= ntohs(eth_hdr
->h_proto
);
2659 struct udphdr
*udp_hdr
;
2661 len
= (eth_type
== ETH_P_IP
? sizeof(struct iphdr
) :
2662 sizeof(struct ipv6hdr
));
2663 tmp_buf
+= sizeof(struct ethhdr
) + len
;
2664 udp_hdr
= (struct udphdr
*)tmp_buf
;
2665 if (ntohs(udp_hdr
->dest
) ==
2678 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp
*qp1_qp
,
2679 struct bnxt_qplib_cqe
*cqe
)
2681 struct bnxt_re_dev
*rdev
= qp1_qp
->rdev
;
2682 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
2683 struct bnxt_re_qp
*qp
= rdev
->qp1_sqp
;
2684 struct ib_send_wr
*swr
;
2685 struct ib_ud_wr udwr
;
2686 struct ib_recv_wr rwr
;
2690 dma_addr_t rq_hdr_buf_map
;
2691 dma_addr_t shrq_hdr_buf_map
;
2694 struct ib_sge s_sge
[2];
2695 struct ib_sge r_sge
[2];
2698 memset(&udwr
, 0, sizeof(udwr
));
2699 memset(&rwr
, 0, sizeof(rwr
));
2700 memset(&s_sge
, 0, sizeof(s_sge
));
2701 memset(&r_sge
, 0, sizeof(r_sge
));
2704 tbl_idx
= cqe
->wr_id
;
2706 rq_hdr_buf
= qp1_qp
->qplib_qp
.rq_hdr_buf
+
2707 (tbl_idx
* qp1_qp
->qplib_qp
.rq_hdr_buf_size
);
2708 rq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&qp1_qp
->qplib_qp
,
2711 /* Shadow QP header buffer */
2712 shrq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&qp
->qplib_qp
,
2714 sqp_entry
= &rdev
->sqp_tbl
[tbl_idx
];
2716 /* Store this cqe */
2717 memcpy(&sqp_entry
->cqe
, cqe
, sizeof(struct bnxt_qplib_cqe
));
2718 sqp_entry
->qp1_qp
= qp1_qp
;
2720 /* Find packet type from the cqe */
2722 pkt_type
= bnxt_re_check_packet_type(cqe
->raweth_qp1_flags
,
2723 cqe
->raweth_qp1_flags2
);
2725 dev_err(rdev_to_dev(rdev
), "Invalid packet\n");
2729 /* Adjust the offset for the user buffer and post in the rq */
2731 if (pkt_type
== BNXT_RE_ROCEV2_IPV4_PACKET
)
2735 * QP1 loopback packet has 4 bytes of internal header before
2736 * ether header. Skip these four bytes.
2738 if (bnxt_re_is_loopback_packet(rdev
, rq_hdr_buf
))
2741 /* First send SGE . Skip the ether header*/
2742 s_sge
[0].addr
= rq_hdr_buf_map
+ BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2744 s_sge
[0].lkey
= 0xFFFFFFFF;
2745 s_sge
[0].length
= offset
? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4
:
2746 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
2748 /* Second Send SGE */
2749 s_sge
[1].addr
= s_sge
[0].addr
+ s_sge
[0].length
+
2750 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE
;
2751 if (pkt_type
!= BNXT_RE_ROCE_V1_PACKET
)
2753 s_sge
[1].lkey
= 0xFFFFFFFF;
2754 s_sge
[1].length
= 256;
2756 /* First recv SGE */
2758 r_sge
[0].addr
= shrq_hdr_buf_map
;
2759 r_sge
[0].lkey
= 0xFFFFFFFF;
2760 r_sge
[0].length
= 40;
2762 r_sge
[1].addr
= sqp_entry
->sge
.addr
+ offset
;
2763 r_sge
[1].lkey
= sqp_entry
->sge
.lkey
;
2764 r_sge
[1].length
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
+ 256 - offset
;
2766 /* Create receive work request */
2768 rwr
.sg_list
= r_sge
;
2769 rwr
.wr_id
= tbl_idx
;
2772 rc
= bnxt_re_post_recv_shadow_qp(rdev
, qp
, &rwr
);
2774 dev_err(rdev_to_dev(rdev
),
2775 "Failed to post Rx buffers to shadow QP");
2780 swr
->sg_list
= s_sge
;
2781 swr
->wr_id
= tbl_idx
;
2782 swr
->opcode
= IB_WR_SEND
;
2785 udwr
.ah
= &rdev
->sqp_ah
->ib_ah
;
2786 udwr
.remote_qpn
= rdev
->qp1_sqp
->qplib_qp
.id
;
2787 udwr
.remote_qkey
= rdev
->qp1_sqp
->qplib_qp
.qkey
;
2789 /* post data received in the send queue */
2790 rc
= bnxt_re_post_send_shadow_qp(rdev
, qp
, swr
);
2795 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc
*wc
,
2796 struct bnxt_qplib_cqe
*cqe
)
2798 wc
->opcode
= IB_WC_RECV
;
2799 wc
->status
= __rawqp1_to_ib_wc_status(cqe
->status
);
2800 wc
->wc_flags
|= IB_WC_GRH
;
2803 static void bnxt_re_process_res_rc_wc(struct ib_wc
*wc
,
2804 struct bnxt_qplib_cqe
*cqe
)
2806 wc
->opcode
= IB_WC_RECV
;
2807 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
2809 if (cqe
->flags
& CQ_RES_RC_FLAGS_IMM
)
2810 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2811 if (cqe
->flags
& CQ_RES_RC_FLAGS_INV
)
2812 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2813 if ((cqe
->flags
& (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
)) ==
2814 (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
))
2815 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2818 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp
*qp
,
2820 struct bnxt_qplib_cqe
*cqe
)
2823 struct bnxt_re_dev
*rdev
= qp
->rdev
;
2824 struct bnxt_re_qp
*qp1_qp
= NULL
;
2825 struct bnxt_qplib_cqe
*orig_cqe
= NULL
;
2826 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
2829 tbl_idx
= cqe
->wr_id
;
2831 sqp_entry
= &rdev
->sqp_tbl
[tbl_idx
];
2832 qp1_qp
= sqp_entry
->qp1_qp
;
2833 orig_cqe
= &sqp_entry
->cqe
;
2835 wc
->wr_id
= sqp_entry
->wrid
;
2836 wc
->byte_len
= orig_cqe
->length
;
2837 wc
->qp
= &qp1_qp
->ib_qp
;
2839 wc
->ex
.imm_data
= orig_cqe
->immdata
;
2840 wc
->src_qp
= orig_cqe
->src_qp
;
2841 memcpy(wc
->smac
, orig_cqe
->smac
, ETH_ALEN
);
2843 wc
->vendor_err
= orig_cqe
->status
;
2845 wc
->opcode
= IB_WC_RECV
;
2846 wc
->status
= __rawqp1_to_ib_wc_status(orig_cqe
->status
);
2847 wc
->wc_flags
|= IB_WC_GRH
;
2849 nw_type
= bnxt_re_check_packet_type(orig_cqe
->raweth_qp1_flags
,
2850 orig_cqe
->raweth_qp1_flags2
);
2852 wc
->network_hdr_type
= bnxt_re_to_ib_nw_type(nw_type
);
2853 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
2857 static void bnxt_re_process_res_ud_wc(struct ib_wc
*wc
,
2858 struct bnxt_qplib_cqe
*cqe
)
2860 wc
->opcode
= IB_WC_RECV
;
2861 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
2863 if (cqe
->flags
& CQ_RES_RC_FLAGS_IMM
)
2864 wc
->wc_flags
|= IB_WC_WITH_IMM
;
2865 if (cqe
->flags
& CQ_RES_RC_FLAGS_INV
)
2866 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2867 if ((cqe
->flags
& (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
)) ==
2868 (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
))
2869 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2872 static int send_phantom_wqe(struct bnxt_re_qp
*qp
)
2874 struct bnxt_qplib_qp
*lib_qp
= &qp
->qplib_qp
;
2875 unsigned long flags
;
2878 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2880 rc
= bnxt_re_bind_fence_mw(lib_qp
);
2882 lib_qp
->sq
.phantom_wqe_cnt
++;
2883 dev_dbg(&lib_qp
->sq
.hwq
.pdev
->dev
,
2884 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2885 lib_qp
->id
, lib_qp
->sq
.hwq
.prod
,
2886 HWQ_CMP(lib_qp
->sq
.hwq
.prod
, &lib_qp
->sq
.hwq
),
2887 lib_qp
->sq
.phantom_wqe_cnt
);
2890 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2894 int bnxt_re_poll_cq(struct ib_cq
*ib_cq
, int num_entries
, struct ib_wc
*wc
)
2896 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
2897 struct bnxt_re_qp
*qp
;
2898 struct bnxt_qplib_cqe
*cqe
;
2899 int i
, ncqe
, budget
;
2900 struct bnxt_qplib_q
*sq
;
2901 struct bnxt_qplib_qp
*lib_qp
;
2903 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
2904 unsigned long flags
;
2906 spin_lock_irqsave(&cq
->cq_lock
, flags
);
2907 budget
= min_t(u32
, num_entries
, cq
->max_cql
);
2909 dev_err(rdev_to_dev(cq
->rdev
), "POLL CQ : no CQL to use");
2915 ncqe
= bnxt_qplib_poll_cq(&cq
->qplib_cq
, cqe
, budget
, &lib_qp
);
2918 if (sq
->send_phantom
) {
2919 qp
= container_of(lib_qp
,
2920 struct bnxt_re_qp
, qplib_qp
);
2921 if (send_phantom_wqe(qp
) == -ENOMEM
)
2922 dev_err(rdev_to_dev(cq
->rdev
),
2923 "Phantom failed! Scheduled to send again\n");
2925 sq
->send_phantom
= false;
2932 for (i
= 0; i
< ncqe
; i
++, cqe
++) {
2933 /* Transcribe each qplib_wqe back to ib_wc */
2934 memset(wc
, 0, sizeof(*wc
));
2936 wc
->wr_id
= cqe
->wr_id
;
2937 wc
->byte_len
= cqe
->length
;
2939 ((struct bnxt_qplib_qp
*)
2940 (unsigned long)(cqe
->qp_handle
),
2941 struct bnxt_re_qp
, qplib_qp
);
2943 dev_err(rdev_to_dev(cq
->rdev
),
2944 "POLL CQ : bad QP handle");
2947 wc
->qp
= &qp
->ib_qp
;
2948 wc
->ex
.imm_data
= cqe
->immdata
;
2949 wc
->src_qp
= cqe
->src_qp
;
2950 memcpy(wc
->smac
, cqe
->smac
, ETH_ALEN
);
2952 wc
->vendor_err
= cqe
->status
;
2954 switch (cqe
->opcode
) {
2955 case CQ_BASE_CQE_TYPE_REQ
:
2956 if (qp
->qplib_qp
.id
==
2957 qp
->rdev
->qp1_sqp
->qplib_qp
.id
) {
2958 /* Handle this completion with
2959 * the stored completion
2961 memset(wc
, 0, sizeof(*wc
));
2964 bnxt_re_process_req_wc(wc
, cqe
);
2966 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
2970 rc
= bnxt_re_process_raw_qp_pkt_rx
2973 memset(wc
, 0, sizeof(*wc
));
2978 /* Errors need not be looped back.
2979 * But change the wr_id to the one
2980 * stored in the table
2982 tbl_idx
= cqe
->wr_id
;
2983 sqp_entry
= &cq
->rdev
->sqp_tbl
[tbl_idx
];
2984 wc
->wr_id
= sqp_entry
->wrid
;
2985 bnxt_re_process_res_rawqp1_wc(wc
, cqe
);
2987 case CQ_BASE_CQE_TYPE_RES_RC
:
2988 bnxt_re_process_res_rc_wc(wc
, cqe
);
2990 case CQ_BASE_CQE_TYPE_RES_UD
:
2991 if (qp
->qplib_qp
.id
==
2992 qp
->rdev
->qp1_sqp
->qplib_qp
.id
) {
2993 /* Handle this completion with
2994 * the stored completion
2999 bnxt_re_process_res_shadow_qp_wc
3004 bnxt_re_process_res_ud_wc(wc
, cqe
);
3007 dev_err(rdev_to_dev(cq
->rdev
),
3008 "POLL CQ : type 0x%x not handled",
3017 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3018 return num_entries
- budget
;
3021 int bnxt_re_req_notify_cq(struct ib_cq
*ib_cq
,
3022 enum ib_cq_notify_flags ib_cqn_flags
)
3024 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
3027 /* Trigger on the very next completion */
3028 if (ib_cqn_flags
& IB_CQ_NEXT_COMP
)
3029 type
= DBR_DBR_TYPE_CQ_ARMALL
;
3030 /* Trigger on the next solicited completion */
3031 else if (ib_cqn_flags
& IB_CQ_SOLICITED
)
3032 type
= DBR_DBR_TYPE_CQ_ARMSE
;
3034 bnxt_qplib_req_notify_cq(&cq
->qplib_cq
, type
);
3039 /* Memory Regions */
3040 struct ib_mr
*bnxt_re_get_dma_mr(struct ib_pd
*ib_pd
, int mr_access_flags
)
3042 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3043 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3044 struct bnxt_re_mr
*mr
;
3048 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3050 return ERR_PTR(-ENOMEM
);
3053 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3054 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3055 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3057 /* Allocate and register 0 as the address */
3058 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3062 mr
->qplib_mr
.hwq
.level
= PBL_LVL_MAX
;
3063 mr
->qplib_mr
.total_size
= -1; /* Infinte length */
3064 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl
, 0, false);
3068 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3069 if (mr_access_flags
& (IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_REMOTE_READ
|
3070 IB_ACCESS_REMOTE_ATOMIC
))
3071 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3072 atomic_inc(&rdev
->mr_count
);
3077 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3083 int bnxt_re_dereg_mr(struct ib_mr
*ib_mr
)
3085 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3086 struct bnxt_re_dev
*rdev
= mr
->rdev
;
3089 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3091 dev_err(rdev_to_dev(rdev
), "Dereg MR failed: %#x\n", rc
);
3095 if (mr
->npages
&& mr
->pages
) {
3096 rc
= bnxt_qplib_free_fast_reg_page_list(&rdev
->qplib_res
,
3102 if (!IS_ERR_OR_NULL(mr
->ib_umem
))
3103 ib_umem_release(mr
->ib_umem
);
3106 atomic_dec(&rdev
->mr_count
);
3110 static int bnxt_re_set_page(struct ib_mr
*ib_mr
, u64 addr
)
3112 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3114 if (unlikely(mr
->npages
== mr
->qplib_frpl
.max_pg_ptrs
))
3117 mr
->pages
[mr
->npages
++] = addr
;
3121 int bnxt_re_map_mr_sg(struct ib_mr
*ib_mr
, struct scatterlist
*sg
, int sg_nents
,
3122 unsigned int *sg_offset
)
3124 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3127 return ib_sg_to_pages(ib_mr
, sg
, sg_nents
, sg_offset
, bnxt_re_set_page
);
3130 struct ib_mr
*bnxt_re_alloc_mr(struct ib_pd
*ib_pd
, enum ib_mr_type type
,
3133 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3134 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3135 struct bnxt_re_mr
*mr
= NULL
;
3138 if (type
!= IB_MR_TYPE_MEM_REG
) {
3139 dev_dbg(rdev_to_dev(rdev
), "MR type 0x%x not supported", type
);
3140 return ERR_PTR(-EINVAL
);
3142 if (max_num_sg
> MAX_PBL_LVL_1_PGS
)
3143 return ERR_PTR(-EINVAL
);
3145 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3147 return ERR_PTR(-ENOMEM
);
3150 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3151 mr
->qplib_mr
.flags
= BNXT_QPLIB_FR_PMR
;
3152 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3154 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3158 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3159 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3161 mr
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
3166 rc
= bnxt_qplib_alloc_fast_reg_page_list(&rdev
->qplib_res
,
3167 &mr
->qplib_frpl
, max_num_sg
);
3169 dev_err(rdev_to_dev(rdev
),
3170 "Failed to allocate HW FR page list");
3174 atomic_inc(&rdev
->mr_count
);
3178 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3185 struct ib_mw
*bnxt_re_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
3186 struct ib_udata
*udata
)
3188 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3189 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3190 struct bnxt_re_mw
*mw
;
3193 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
3195 return ERR_PTR(-ENOMEM
);
3197 mw
->qplib_mw
.pd
= &pd
->qplib_pd
;
3199 mw
->qplib_mw
.type
= (type
== IB_MW_TYPE_1
?
3200 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
:
3201 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
);
3202 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3204 dev_err(rdev_to_dev(rdev
), "Allocate MW failed!");
3207 mw
->ib_mw
.rkey
= mw
->qplib_mw
.rkey
;
3209 atomic_inc(&rdev
->mw_count
);
3217 int bnxt_re_dealloc_mw(struct ib_mw
*ib_mw
)
3219 struct bnxt_re_mw
*mw
= container_of(ib_mw
, struct bnxt_re_mw
, ib_mw
);
3220 struct bnxt_re_dev
*rdev
= mw
->rdev
;
3223 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3225 dev_err(rdev_to_dev(rdev
), "Free MW failed: %#x\n", rc
);
3230 atomic_dec(&rdev
->mw_count
);
3235 struct ib_mr
*bnxt_re_reg_user_mr(struct ib_pd
*ib_pd
, u64 start
, u64 length
,
3236 u64 virt_addr
, int mr_access_flags
,
3237 struct ib_udata
*udata
)
3239 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3240 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3241 struct bnxt_re_mr
*mr
;
3242 struct ib_umem
*umem
;
3243 u64
*pbl_tbl
, *pbl_tbl_orig
;
3244 int i
, umem_pgs
, pages
, rc
;
3245 struct scatterlist
*sg
;
3248 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3250 return ERR_PTR(-ENOMEM
);
3253 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3254 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3255 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR
;
3257 umem
= ib_umem_get(ib_pd
->uobject
->context
, start
, length
,
3258 mr_access_flags
, 0);
3260 dev_err(rdev_to_dev(rdev
), "Failed to get umem");
3266 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3268 dev_err(rdev_to_dev(rdev
), "Failed to allocate MR");
3271 /* The fixed portion of the rkey is the same as the lkey */
3272 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
3274 mr
->qplib_mr
.va
= virt_addr
;
3275 umem_pgs
= ib_umem_page_count(umem
);
3277 dev_err(rdev_to_dev(rdev
), "umem is invalid!");
3281 mr
->qplib_mr
.total_size
= length
;
3283 pbl_tbl
= kcalloc(umem_pgs
, sizeof(u64
*), GFP_KERNEL
);
3288 pbl_tbl_orig
= pbl_tbl
;
3290 if (umem
->hugetlb
) {
3291 dev_err(rdev_to_dev(rdev
), "umem hugetlb not supported!");
3296 if (umem
->page_shift
!= PAGE_SHIFT
) {
3297 dev_err(rdev_to_dev(rdev
), "umem page shift unsupported!");
3301 /* Map umem buf ptrs to the PBL */
3302 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
3303 pages
= sg_dma_len(sg
) >> umem
->page_shift
;
3304 for (i
= 0; i
< pages
; i
++, pbl_tbl
++)
3305 *pbl_tbl
= sg_dma_address(sg
) + (i
<< umem
->page_shift
);
3307 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, pbl_tbl_orig
,
3310 dev_err(rdev_to_dev(rdev
), "Failed to register user MR");
3314 kfree(pbl_tbl_orig
);
3316 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3317 mr
->ib_mr
.rkey
= mr
->qplib_mr
.lkey
;
3318 atomic_inc(&rdev
->mr_count
);
3322 kfree(pbl_tbl_orig
);
3324 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3326 ib_umem_release(umem
);
3332 struct ib_ucontext
*bnxt_re_alloc_ucontext(struct ib_device
*ibdev
,
3333 struct ib_udata
*udata
)
3335 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
3336 struct bnxt_re_uctx_resp resp
;
3337 struct bnxt_re_ucontext
*uctx
;
3338 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
3341 dev_dbg(rdev_to_dev(rdev
), "ABI version requested %d",
3342 ibdev
->uverbs_abi_ver
);
3344 if (ibdev
->uverbs_abi_ver
!= BNXT_RE_ABI_VERSION
) {
3345 dev_dbg(rdev_to_dev(rdev
), " is different from the device %d ",
3346 BNXT_RE_ABI_VERSION
);
3347 return ERR_PTR(-EPERM
);
3350 uctx
= kzalloc(sizeof(*uctx
), GFP_KERNEL
);
3352 return ERR_PTR(-ENOMEM
);
3356 uctx
->shpg
= (void *)__get_free_page(GFP_KERNEL
);
3361 spin_lock_init(&uctx
->sh_lock
);
3363 resp
.dev_id
= rdev
->en_dev
->pdev
->devfn
; /*Temp, Use idr_alloc instead*/
3364 resp
.max_qp
= rdev
->qplib_ctx
.qpc_count
;
3365 resp
.pg_size
= PAGE_SIZE
;
3366 resp
.cqe_sz
= sizeof(struct cq_base
);
3367 resp
.max_cqd
= dev_attr
->max_cq_wqes
;
3370 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
3372 dev_err(rdev_to_dev(rdev
), "Failed to copy user context");
3377 return &uctx
->ib_uctx
;
3379 free_page((unsigned long)uctx
->shpg
);
3386 int bnxt_re_dealloc_ucontext(struct ib_ucontext
*ib_uctx
)
3388 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3389 struct bnxt_re_ucontext
,
3392 free_page((unsigned long)uctx
->shpg
);
3397 /* Helper function to mmap the virtual memory from user app */
3398 int bnxt_re_mmap(struct ib_ucontext
*ib_uctx
, struct vm_area_struct
*vma
)
3400 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3401 struct bnxt_re_ucontext
,
3403 struct bnxt_re_dev
*rdev
= uctx
->rdev
;
3406 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
3409 if (vma
->vm_pgoff
) {
3410 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
3411 if (io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
3412 PAGE_SIZE
, vma
->vm_page_prot
)) {
3413 dev_err(rdev_to_dev(rdev
), "Failed to map DPI");
3417 pfn
= virt_to_phys(uctx
->shpg
) >> PAGE_SHIFT
;
3418 if (remap_pfn_range(vma
, vma
->vm_start
,
3419 pfn
, PAGE_SIZE
, vma
->vm_page_prot
)) {
3420 dev_err(rdev_to_dev(rdev
),
3421 "Failed to map shared page");