2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: IB Verbs interpreter
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
56 #include "qplib_res.h"
59 #include "qplib_rcfw.h"
63 #include <rdma/bnxt_re-abi.h>
65 static int __from_ib_access_flags(int iflags
)
69 if (iflags
& IB_ACCESS_LOCAL_WRITE
)
70 qflags
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
71 if (iflags
& IB_ACCESS_REMOTE_READ
)
72 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_READ
;
73 if (iflags
& IB_ACCESS_REMOTE_WRITE
)
74 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_WRITE
;
75 if (iflags
& IB_ACCESS_REMOTE_ATOMIC
)
76 qflags
|= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
;
77 if (iflags
& IB_ACCESS_MW_BIND
)
78 qflags
|= BNXT_QPLIB_ACCESS_MW_BIND
;
79 if (iflags
& IB_ZERO_BASED
)
80 qflags
|= BNXT_QPLIB_ACCESS_ZERO_BASED
;
81 if (iflags
& IB_ACCESS_ON_DEMAND
)
82 qflags
|= BNXT_QPLIB_ACCESS_ON_DEMAND
;
86 static enum ib_access_flags
__to_ib_access_flags(int qflags
)
88 enum ib_access_flags iflags
= 0;
90 if (qflags
& BNXT_QPLIB_ACCESS_LOCAL_WRITE
)
91 iflags
|= IB_ACCESS_LOCAL_WRITE
;
92 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_WRITE
)
93 iflags
|= IB_ACCESS_REMOTE_WRITE
;
94 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_READ
)
95 iflags
|= IB_ACCESS_REMOTE_READ
;
96 if (qflags
& BNXT_QPLIB_ACCESS_REMOTE_ATOMIC
)
97 iflags
|= IB_ACCESS_REMOTE_ATOMIC
;
98 if (qflags
& BNXT_QPLIB_ACCESS_MW_BIND
)
99 iflags
|= IB_ACCESS_MW_BIND
;
100 if (qflags
& BNXT_QPLIB_ACCESS_ZERO_BASED
)
101 iflags
|= IB_ZERO_BASED
;
102 if (qflags
& BNXT_QPLIB_ACCESS_ON_DEMAND
)
103 iflags
|= IB_ACCESS_ON_DEMAND
;
107 static int bnxt_re_build_sgl(struct ib_sge
*ib_sg_list
,
108 struct bnxt_qplib_sge
*sg_list
, int num
)
112 for (i
= 0; i
< num
; i
++) {
113 sg_list
[i
].addr
= ib_sg_list
[i
].addr
;
114 sg_list
[i
].lkey
= ib_sg_list
[i
].lkey
;
115 sg_list
[i
].size
= ib_sg_list
[i
].length
;
116 total
+= sg_list
[i
].size
;
122 int bnxt_re_query_device(struct ib_device
*ibdev
,
123 struct ib_device_attr
*ib_attr
,
124 struct ib_udata
*udata
)
126 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
127 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
129 memset(ib_attr
, 0, sizeof(*ib_attr
));
130 memcpy(&ib_attr
->fw_ver
, dev_attr
->fw_ver
,
131 min(sizeof(dev_attr
->fw_ver
),
132 sizeof(ib_attr
->fw_ver
)));
133 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
,
134 (u8
*)&ib_attr
->sys_image_guid
);
135 ib_attr
->max_mr_size
= BNXT_RE_MAX_MR_SIZE
;
136 ib_attr
->page_size_cap
= BNXT_RE_PAGE_SIZE_4K
| BNXT_RE_PAGE_SIZE_2M
;
138 ib_attr
->vendor_id
= rdev
->en_dev
->pdev
->vendor
;
139 ib_attr
->vendor_part_id
= rdev
->en_dev
->pdev
->device
;
140 ib_attr
->hw_ver
= rdev
->en_dev
->pdev
->subsystem_device
;
141 ib_attr
->max_qp
= dev_attr
->max_qp
;
142 ib_attr
->max_qp_wr
= dev_attr
->max_qp_wqes
;
143 ib_attr
->device_cap_flags
=
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS
;
155 ib_attr
->max_send_sge
= dev_attr
->max_qp_sges
;
156 ib_attr
->max_recv_sge
= dev_attr
->max_qp_sges
;
157 ib_attr
->max_sge_rd
= dev_attr
->max_qp_sges
;
158 ib_attr
->max_cq
= dev_attr
->max_cq
;
159 ib_attr
->max_cqe
= dev_attr
->max_cq_wqes
;
160 ib_attr
->max_mr
= dev_attr
->max_mr
;
161 ib_attr
->max_pd
= dev_attr
->max_pd
;
162 ib_attr
->max_qp_rd_atom
= dev_attr
->max_qp_rd_atom
;
163 ib_attr
->max_qp_init_rd_atom
= dev_attr
->max_qp_init_rd_atom
;
164 ib_attr
->atomic_cap
= IB_ATOMIC_NONE
;
165 ib_attr
->masked_atomic_cap
= IB_ATOMIC_NONE
;
167 ib_attr
->max_ee_rd_atom
= 0;
168 ib_attr
->max_res_rd_atom
= 0;
169 ib_attr
->max_ee_init_rd_atom
= 0;
171 ib_attr
->max_rdd
= 0;
172 ib_attr
->max_mw
= dev_attr
->max_mw
;
173 ib_attr
->max_raw_ipv6_qp
= 0;
174 ib_attr
->max_raw_ethy_qp
= dev_attr
->max_raw_ethy_qp
;
175 ib_attr
->max_mcast_grp
= 0;
176 ib_attr
->max_mcast_qp_attach
= 0;
177 ib_attr
->max_total_mcast_qp_attach
= 0;
178 ib_attr
->max_ah
= dev_attr
->max_ah
;
180 ib_attr
->max_srq
= dev_attr
->max_srq
;
181 ib_attr
->max_srq_wr
= dev_attr
->max_srq_wqes
;
182 ib_attr
->max_srq_sge
= dev_attr
->max_srq_sges
;
184 ib_attr
->max_fast_reg_page_list_len
= MAX_PBL_LVL_1_PGS
;
186 ib_attr
->max_pkeys
= 1;
187 ib_attr
->local_ca_ack_delay
= BNXT_RE_DEFAULT_ACK_DELAY
;
192 int bnxt_re_query_port(struct ib_device
*ibdev
, u8 port_num
,
193 struct ib_port_attr
*port_attr
)
195 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
196 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
198 memset(port_attr
, 0, sizeof(*port_attr
));
200 if (netif_running(rdev
->netdev
) && netif_carrier_ok(rdev
->netdev
)) {
201 port_attr
->state
= IB_PORT_ACTIVE
;
202 port_attr
->phys_state
= IB_PORT_PHYS_STATE_LINK_UP
;
204 port_attr
->state
= IB_PORT_DOWN
;
205 port_attr
->phys_state
= IB_PORT_PHYS_STATE_DISABLED
;
207 port_attr
->max_mtu
= IB_MTU_4096
;
208 port_attr
->active_mtu
= iboe_get_mtu(rdev
->netdev
->mtu
);
209 port_attr
->gid_tbl_len
= dev_attr
->max_sgid
;
210 port_attr
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
211 IB_PORT_DEVICE_MGMT_SUP
|
212 IB_PORT_VENDOR_CLASS_SUP
;
213 port_attr
->ip_gids
= true;
215 port_attr
->max_msg_sz
= (u32
)BNXT_RE_MAX_MR_SIZE_LOW
;
216 port_attr
->bad_pkey_cntr
= 0;
217 port_attr
->qkey_viol_cntr
= 0;
218 port_attr
->pkey_tbl_len
= dev_attr
->max_pkey
;
220 port_attr
->sm_lid
= 0;
222 port_attr
->max_vl_num
= 4;
223 port_attr
->sm_sl
= 0;
224 port_attr
->subnet_timeout
= 0;
225 port_attr
->init_type_reply
= 0;
226 port_attr
->active_speed
= rdev
->active_speed
;
227 port_attr
->active_width
= rdev
->active_width
;
232 int bnxt_re_get_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
233 struct ib_port_immutable
*immutable
)
235 struct ib_port_attr port_attr
;
237 if (bnxt_re_query_port(ibdev
, port_num
, &port_attr
))
240 immutable
->pkey_tbl_len
= port_attr
.pkey_tbl_len
;
241 immutable
->gid_tbl_len
= port_attr
.gid_tbl_len
;
242 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
243 immutable
->core_cap_flags
|= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
;
244 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
248 void bnxt_re_query_fw_str(struct ib_device
*ibdev
, char *str
)
250 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
252 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d.%d",
253 rdev
->dev_attr
.fw_ver
[0], rdev
->dev_attr
.fw_ver
[1],
254 rdev
->dev_attr
.fw_ver
[2], rdev
->dev_attr
.fw_ver
[3]);
257 int bnxt_re_query_pkey(struct ib_device
*ibdev
, u8 port_num
,
258 u16 index
, u16
*pkey
)
260 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
262 /* Ignore port_num */
264 memset(pkey
, 0, sizeof(*pkey
));
265 return bnxt_qplib_get_pkey(&rdev
->qplib_res
,
266 &rdev
->qplib_res
.pkey_tbl
, index
, pkey
);
269 int bnxt_re_query_gid(struct ib_device
*ibdev
, u8 port_num
,
270 int index
, union ib_gid
*gid
)
272 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
275 /* Ignore port_num */
276 memset(gid
, 0, sizeof(*gid
));
277 rc
= bnxt_qplib_get_sgid(&rdev
->qplib_res
,
278 &rdev
->qplib_res
.sgid_tbl
, index
,
279 (struct bnxt_qplib_gid
*)gid
);
283 int bnxt_re_del_gid(const struct ib_gid_attr
*attr
, void **context
)
286 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
287 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(attr
->device
, ibdev
);
288 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
289 struct bnxt_qplib_gid
*gid_to_del
;
290 u16 vlan_id
= 0xFFFF;
292 /* Delete the entry from the hardware */
297 if (sgid_tbl
&& sgid_tbl
->active
) {
298 if (ctx
->idx
>= sgid_tbl
->max
)
300 gid_to_del
= &sgid_tbl
->tbl
[ctx
->idx
].gid
;
301 vlan_id
= sgid_tbl
->tbl
[ctx
->idx
].vlan_id
;
302 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
303 * or via the ib_unregister_device path. In the former case QP1
304 * may not be destroyed yet, in which case just return as FW
305 * needs that entry to be present and will fail it's deletion.
306 * We could get invoked again after QP1 is destroyed OR get an
307 * ADD_GID call with a different GID value for the same index
308 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
311 rdma_link_local_addr((struct in6_addr
*)gid_to_del
) &&
312 ctx
->refcnt
== 1 && rdev
->gsi_ctx
.gsi_sqp
) {
313 ibdev_dbg(&rdev
->ibdev
,
314 "Trying to delete GID0 while QP1 is alive\n");
319 rc
= bnxt_qplib_del_sgid(sgid_tbl
, gid_to_del
,
322 ibdev_err(&rdev
->ibdev
,
323 "Failed to remove GID: %#x", rc
);
325 ctx_tbl
= sgid_tbl
->ctx
;
326 ctx_tbl
[ctx
->idx
] = NULL
;
336 int bnxt_re_add_gid(const struct ib_gid_attr
*attr
, void **context
)
340 u16 vlan_id
= 0xFFFF;
341 struct bnxt_re_gid_ctx
*ctx
, **ctx_tbl
;
342 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(attr
->device
, ibdev
);
343 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
345 rc
= rdma_read_gid_l2_fields(attr
, &vlan_id
, NULL
);
349 rc
= bnxt_qplib_add_sgid(sgid_tbl
, (struct bnxt_qplib_gid
*)&attr
->gid
,
350 rdev
->qplib_res
.netdev
->dev_addr
,
351 vlan_id
, true, &tbl_idx
);
352 if (rc
== -EALREADY
) {
353 ctx_tbl
= sgid_tbl
->ctx
;
354 ctx_tbl
[tbl_idx
]->refcnt
++;
355 *context
= ctx_tbl
[tbl_idx
];
360 ibdev_err(&rdev
->ibdev
, "Failed to add GID: %#x", rc
);
364 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
367 ctx_tbl
= sgid_tbl
->ctx
;
370 ctx_tbl
[tbl_idx
] = ctx
;
376 enum rdma_link_layer
bnxt_re_get_link_layer(struct ib_device
*ibdev
,
379 return IB_LINK_LAYER_ETHERNET
;
382 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
384 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd
*pd
)
386 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
387 struct ib_mr
*ib_mr
= &fence
->mr
->ib_mr
;
388 struct bnxt_qplib_swqe
*wqe
= &fence
->bind_wqe
;
390 memset(wqe
, 0, sizeof(*wqe
));
391 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_BIND_MW
;
392 wqe
->wr_id
= BNXT_QPLIB_FENCE_WRID
;
393 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
394 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
395 wqe
->bind
.zero_based
= false;
396 wqe
->bind
.parent_l_key
= ib_mr
->lkey
;
397 wqe
->bind
.va
= (u64
)(unsigned long)fence
->va
;
398 wqe
->bind
.length
= fence
->size
;
399 wqe
->bind
.access_cntl
= __from_ib_access_flags(IB_ACCESS_REMOTE_READ
);
400 wqe
->bind
.mw_type
= SQ_BIND_MW_TYPE_TYPE1
;
402 /* Save the initial rkey in fence structure for now;
403 * wqe->bind.r_key will be set at (re)bind time.
405 fence
->bind_rkey
= ib_inc_rkey(fence
->mw
->rkey
);
408 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp
*qplib_qp
)
410 struct bnxt_re_qp
*qp
= container_of(qplib_qp
, struct bnxt_re_qp
,
412 struct ib_pd
*ib_pd
= qp
->ib_qp
.pd
;
413 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
414 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
415 struct bnxt_qplib_swqe
*fence_wqe
= &fence
->bind_wqe
;
416 struct bnxt_qplib_swqe wqe
;
419 memcpy(&wqe
, fence_wqe
, sizeof(wqe
));
420 wqe
.bind
.r_key
= fence
->bind_rkey
;
421 fence
->bind_rkey
= ib_inc_rkey(fence
->bind_rkey
);
423 ibdev_dbg(&qp
->rdev
->ibdev
,
424 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
425 wqe
.bind
.r_key
, qp
->qplib_qp
.id
, pd
);
426 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
428 ibdev_err(&qp
->rdev
->ibdev
, "Failed to bind fence-WQE\n");
431 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
436 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd
*pd
)
438 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
439 struct bnxt_re_dev
*rdev
= pd
->rdev
;
440 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
441 struct bnxt_re_mr
*mr
= fence
->mr
;
444 bnxt_re_dealloc_mw(fence
->mw
);
449 bnxt_qplib_dereg_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
,
452 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
456 if (fence
->dma_addr
) {
457 dma_unmap_single(dev
, fence
->dma_addr
, BNXT_RE_FENCE_BYTES
,
463 static int bnxt_re_create_fence_mr(struct bnxt_re_pd
*pd
)
465 int mr_access_flags
= IB_ACCESS_LOCAL_WRITE
| IB_ACCESS_MW_BIND
;
466 struct bnxt_re_fence_data
*fence
= &pd
->fence
;
467 struct bnxt_re_dev
*rdev
= pd
->rdev
;
468 struct device
*dev
= &rdev
->en_dev
->pdev
->dev
;
469 struct bnxt_re_mr
*mr
= NULL
;
470 dma_addr_t dma_addr
= 0;
475 dma_addr
= dma_map_single(dev
, fence
->va
, BNXT_RE_FENCE_BYTES
,
477 rc
= dma_mapping_error(dev
, dma_addr
);
479 ibdev_err(&rdev
->ibdev
, "Failed to dma-map fence-MR-mem\n");
484 fence
->dma_addr
= dma_addr
;
487 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
494 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
495 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
496 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
497 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
499 ibdev_err(&rdev
->ibdev
, "Failed to alloc fence-HW-MR\n");
504 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
505 mr
->qplib_mr
.va
= (u64
)(unsigned long)fence
->va
;
506 mr
->qplib_mr
.total_size
= BNXT_RE_FENCE_BYTES
;
508 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl_tbl
,
509 BNXT_RE_FENCE_PBL_SIZE
, false, PAGE_SIZE
);
511 ibdev_err(&rdev
->ibdev
, "Failed to register fence-MR\n");
514 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
516 /* Create a fence MW only for kernel consumers */
517 mw
= bnxt_re_alloc_mw(&pd
->ib_pd
, IB_MW_TYPE_1
, NULL
);
519 ibdev_err(&rdev
->ibdev
,
520 "Failed to create fence-MW for PD: %p\n", pd
);
526 bnxt_re_create_fence_wqe(pd
);
530 bnxt_re_destroy_fence_mr(pd
);
534 /* Protection Domains */
535 void bnxt_re_dealloc_pd(struct ib_pd
*ib_pd
, struct ib_udata
*udata
)
537 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
538 struct bnxt_re_dev
*rdev
= pd
->rdev
;
540 bnxt_re_destroy_fence_mr(pd
);
543 bnxt_qplib_dealloc_pd(&rdev
->qplib_res
, &rdev
->qplib_res
.pd_tbl
,
547 int bnxt_re_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
549 struct ib_device
*ibdev
= ibpd
->device
;
550 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
551 struct bnxt_re_ucontext
*ucntx
= rdma_udata_to_drv_context(
552 udata
, struct bnxt_re_ucontext
, ib_uctx
);
553 struct bnxt_re_pd
*pd
= container_of(ibpd
, struct bnxt_re_pd
, ib_pd
);
557 if (bnxt_qplib_alloc_pd(&rdev
->qplib_res
.pd_tbl
, &pd
->qplib_pd
)) {
558 ibdev_err(&rdev
->ibdev
, "Failed to allocate HW PD");
564 struct bnxt_re_pd_resp resp
;
566 if (!ucntx
->dpi
.dbr
) {
567 /* Allocate DPI in alloc_pd to avoid failing of
568 * ibv_devinfo and family of application when DPIs
571 if (bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
572 &ucntx
->dpi
, ucntx
)) {
578 resp
.pdid
= pd
->qplib_pd
.id
;
579 /* Still allow mapping this DBR to the new user PD. */
580 resp
.dpi
= ucntx
->dpi
.dpi
;
581 resp
.dbr
= (u64
)ucntx
->dpi
.umdbr
;
583 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
585 ibdev_err(&rdev
->ibdev
,
586 "Failed to copy user response\n");
592 if (bnxt_re_create_fence_mr(pd
))
593 ibdev_warn(&rdev
->ibdev
,
594 "Failed to create Fence-MR\n");
597 bnxt_qplib_dealloc_pd(&rdev
->qplib_res
, &rdev
->qplib_res
.pd_tbl
,
603 /* Address Handles */
604 void bnxt_re_destroy_ah(struct ib_ah
*ib_ah
, u32 flags
)
606 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
607 struct bnxt_re_dev
*rdev
= ah
->rdev
;
609 bnxt_qplib_destroy_ah(&rdev
->qplib_res
, &ah
->qplib_ah
,
610 !(flags
& RDMA_DESTROY_AH_SLEEPABLE
));
613 static u8
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype
)
618 case RDMA_NETWORK_IPV4
:
619 nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV4
;
621 case RDMA_NETWORK_IPV6
:
622 nw_type
= CMDQ_CREATE_AH_TYPE_V2IPV6
;
625 nw_type
= CMDQ_CREATE_AH_TYPE_V1
;
631 int bnxt_re_create_ah(struct ib_ah
*ib_ah
, struct rdma_ah_init_attr
*init_attr
,
632 struct ib_udata
*udata
)
634 struct ib_pd
*ib_pd
= ib_ah
->pd
;
635 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
636 struct rdma_ah_attr
*ah_attr
= init_attr
->ah_attr
;
637 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah_attr
);
638 struct bnxt_re_dev
*rdev
= pd
->rdev
;
639 const struct ib_gid_attr
*sgid_attr
;
640 struct bnxt_re_gid_ctx
*ctx
;
641 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
645 if (!(rdma_ah_get_ah_flags(ah_attr
) & IB_AH_GRH
)) {
646 ibdev_err(&rdev
->ibdev
, "Failed to alloc AH: GRH not set");
651 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
653 /* Supply the configuration for the HW */
654 memcpy(ah
->qplib_ah
.dgid
.data
, grh
->dgid
.raw
,
655 sizeof(union ib_gid
));
656 sgid_attr
= grh
->sgid_attr
;
657 /* Get the HW context of the GID. The reference
658 * of GID table entry is already taken by the caller.
660 ctx
= rdma_read_gid_hw_context(sgid_attr
);
661 ah
->qplib_ah
.sgid_index
= ctx
->idx
;
662 ah
->qplib_ah
.host_sgid_index
= grh
->sgid_index
;
663 ah
->qplib_ah
.traffic_class
= grh
->traffic_class
;
664 ah
->qplib_ah
.flow_label
= grh
->flow_label
;
665 ah
->qplib_ah
.hop_limit
= grh
->hop_limit
;
666 ah
->qplib_ah
.sl
= rdma_ah_get_sl(ah_attr
);
668 /* Get network header type for this GID */
669 nw_type
= rdma_gid_attr_network_type(sgid_attr
);
670 ah
->qplib_ah
.nw_type
= bnxt_re_stack_to_dev_nw_type(nw_type
);
672 memcpy(ah
->qplib_ah
.dmac
, ah_attr
->roce
.dmac
, ETH_ALEN
);
673 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
,
675 RDMA_CREATE_AH_SLEEPABLE
));
677 ibdev_err(&rdev
->ibdev
, "Failed to allocate HW AH");
681 /* Write AVID to shared page. */
683 struct bnxt_re_ucontext
*uctx
= rdma_udata_to_drv_context(
684 udata
, struct bnxt_re_ucontext
, ib_uctx
);
688 spin_lock_irqsave(&uctx
->sh_lock
, flag
);
689 wrptr
= (u32
*)(uctx
->shpg
+ BNXT_RE_AVID_OFFT
);
690 *wrptr
= ah
->qplib_ah
.id
;
691 wmb(); /* make sure cache is updated. */
692 spin_unlock_irqrestore(&uctx
->sh_lock
, flag
);
698 int bnxt_re_modify_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
703 int bnxt_re_query_ah(struct ib_ah
*ib_ah
, struct rdma_ah_attr
*ah_attr
)
705 struct bnxt_re_ah
*ah
= container_of(ib_ah
, struct bnxt_re_ah
, ib_ah
);
707 ah_attr
->type
= ib_ah
->type
;
708 rdma_ah_set_sl(ah_attr
, ah
->qplib_ah
.sl
);
709 memcpy(ah_attr
->roce
.dmac
, ah
->qplib_ah
.dmac
, ETH_ALEN
);
710 rdma_ah_set_grh(ah_attr
, NULL
, 0,
711 ah
->qplib_ah
.host_sgid_index
,
712 0, ah
->qplib_ah
.traffic_class
);
713 rdma_ah_set_dgid_raw(ah_attr
, ah
->qplib_ah
.dgid
.data
);
714 rdma_ah_set_port_num(ah_attr
, 1);
715 rdma_ah_set_static_rate(ah_attr
, 0);
719 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp
*qp
)
720 __acquires(&qp
->scq
->cq_lock
) __acquires(&qp
->rcq
->cq_lock
)
724 spin_lock_irqsave(&qp
->scq
->cq_lock
, flags
);
725 if (qp
->rcq
!= qp
->scq
)
726 spin_lock(&qp
->rcq
->cq_lock
);
728 __acquire(&qp
->rcq
->cq_lock
);
733 void bnxt_re_unlock_cqs(struct bnxt_re_qp
*qp
,
735 __releases(&qp
->scq
->cq_lock
) __releases(&qp
->rcq
->cq_lock
)
737 if (qp
->rcq
!= qp
->scq
)
738 spin_unlock(&qp
->rcq
->cq_lock
);
740 __release(&qp
->rcq
->cq_lock
);
741 spin_unlock_irqrestore(&qp
->scq
->cq_lock
, flags
);
744 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp
*qp
)
746 struct bnxt_re_qp
*gsi_sqp
;
747 struct bnxt_re_ah
*gsi_sah
;
748 struct bnxt_re_dev
*rdev
;
752 gsi_sqp
= rdev
->gsi_ctx
.gsi_sqp
;
753 gsi_sah
= rdev
->gsi_ctx
.gsi_sah
;
755 ibdev_dbg(&rdev
->ibdev
, "Destroy the shadow AH\n");
756 bnxt_qplib_destroy_ah(&rdev
->qplib_res
,
759 bnxt_qplib_clean_qp(&qp
->qplib_qp
);
761 ibdev_dbg(&rdev
->ibdev
, "Destroy the shadow QP\n");
762 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &gsi_sqp
->qplib_qp
);
764 ibdev_err(&rdev
->ibdev
, "Destroy Shadow QP failed");
767 bnxt_qplib_free_qp_res(&rdev
->qplib_res
, &gsi_sqp
->qplib_qp
);
769 /* remove from active qp list */
770 mutex_lock(&rdev
->qp_lock
);
771 list_del(&gsi_sqp
->list
);
772 mutex_unlock(&rdev
->qp_lock
);
773 atomic_dec(&rdev
->qp_count
);
775 kfree(rdev
->gsi_ctx
.sqp_tbl
);
778 rdev
->gsi_ctx
.gsi_sqp
= NULL
;
779 rdev
->gsi_ctx
.gsi_sah
= NULL
;
780 rdev
->gsi_ctx
.sqp_tbl
= NULL
;
788 int bnxt_re_destroy_qp(struct ib_qp
*ib_qp
, struct ib_udata
*udata
)
790 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
791 struct bnxt_re_dev
*rdev
= qp
->rdev
;
795 bnxt_qplib_flush_cqn_wq(&qp
->qplib_qp
);
797 rc
= bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
799 ibdev_err(&rdev
->ibdev
, "Failed to destroy HW QP");
803 if (rdma_is_kernel_res(&qp
->ib_qp
.res
)) {
804 flags
= bnxt_re_lock_cqs(qp
);
805 bnxt_qplib_clean_qp(&qp
->qplib_qp
);
806 bnxt_re_unlock_cqs(qp
, flags
);
809 bnxt_qplib_free_qp_res(&rdev
->qplib_res
, &qp
->qplib_qp
);
811 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->gsi_ctx
.gsi_sqp
) {
812 rc
= bnxt_re_destroy_gsi_sqp(qp
);
817 mutex_lock(&rdev
->qp_lock
);
819 mutex_unlock(&rdev
->qp_lock
);
820 atomic_dec(&rdev
->qp_count
);
822 ib_umem_release(qp
->rumem
);
823 ib_umem_release(qp
->sumem
);
831 static u8
__from_ib_qp_type(enum ib_qp_type type
)
835 return CMDQ_CREATE_QP1_TYPE_GSI
;
837 return CMDQ_CREATE_QP_TYPE_RC
;
839 return CMDQ_CREATE_QP_TYPE_UD
;
845 static u16
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp
*qplqp
,
848 if (qplqp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
)
850 return bnxt_re_get_rwqe_size(rsge
);
853 static u16
bnxt_re_get_wqe_size(int ilsize
, int nsge
)
855 u16 wqe_size
, calc_ils
;
857 wqe_size
= bnxt_re_get_swqe_size(nsge
);
859 calc_ils
= sizeof(struct sq_send_hdr
) + ilsize
;
860 wqe_size
= max_t(u16
, calc_ils
, wqe_size
);
861 wqe_size
= ALIGN(wqe_size
, sizeof(struct sq_send_hdr
));
866 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp
*qp
,
867 struct ib_qp_init_attr
*init_attr
)
869 struct bnxt_qplib_dev_attr
*dev_attr
;
870 struct bnxt_qplib_qp
*qplqp
;
871 struct bnxt_re_dev
*rdev
;
872 struct bnxt_qplib_q
*sq
;
876 qplqp
= &qp
->qplib_qp
;
878 dev_attr
= &rdev
->dev_attr
;
880 align
= sizeof(struct sq_send_hdr
);
881 ilsize
= ALIGN(init_attr
->cap
.max_inline_data
, align
);
883 sq
->wqe_size
= bnxt_re_get_wqe_size(ilsize
, sq
->max_sge
);
884 if (sq
->wqe_size
> bnxt_re_get_swqe_size(dev_attr
->max_qp_sges
))
886 /* For gen p4 and gen p5 backward compatibility mode
887 * wqe size is fixed to 128 bytes
889 if (sq
->wqe_size
< bnxt_re_get_swqe_size(dev_attr
->max_qp_sges
) &&
890 qplqp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
)
891 sq
->wqe_size
= bnxt_re_get_swqe_size(dev_attr
->max_qp_sges
);
893 if (init_attr
->cap
.max_inline_data
) {
894 qplqp
->max_inline_data
= sq
->wqe_size
-
895 sizeof(struct sq_send_hdr
);
896 init_attr
->cap
.max_inline_data
= qplqp
->max_inline_data
;
897 if (qplqp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
)
898 sq
->max_sge
= qplqp
->max_inline_data
/
899 sizeof(struct sq_sge
);
905 static int bnxt_re_init_user_qp(struct bnxt_re_dev
*rdev
, struct bnxt_re_pd
*pd
,
906 struct bnxt_re_qp
*qp
, struct ib_udata
*udata
)
908 struct bnxt_qplib_qp
*qplib_qp
;
909 struct bnxt_re_ucontext
*cntx
;
910 struct bnxt_re_qp_req ureq
;
911 int bytes
= 0, psn_sz
;
912 struct ib_umem
*umem
;
915 qplib_qp
= &qp
->qplib_qp
;
916 cntx
= rdma_udata_to_drv_context(udata
, struct bnxt_re_ucontext
,
918 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
921 bytes
= (qplib_qp
->sq
.max_wqe
* qplib_qp
->sq
.wqe_size
);
922 /* Consider mapping PSN search memory only for RC QPs. */
923 if (qplib_qp
->type
== CMDQ_CREATE_QP_TYPE_RC
) {
924 psn_sz
= bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
) ?
925 sizeof(struct sq_psn_search_ext
) :
926 sizeof(struct sq_psn_search
);
927 psn_nume
= (qplib_qp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_STATIC
) ?
928 qplib_qp
->sq
.max_wqe
:
929 ((qplib_qp
->sq
.max_wqe
* qplib_qp
->sq
.wqe_size
) /
930 sizeof(struct bnxt_qplib_sge
));
931 bytes
+= (psn_nume
* psn_sz
);
934 bytes
= PAGE_ALIGN(bytes
);
935 umem
= ib_umem_get(&rdev
->ibdev
, ureq
.qpsva
, bytes
,
936 IB_ACCESS_LOCAL_WRITE
);
938 return PTR_ERR(umem
);
941 qplib_qp
->sq
.sg_info
.sghead
= umem
->sg_head
.sgl
;
942 qplib_qp
->sq
.sg_info
.npages
= ib_umem_num_pages(umem
);
943 qplib_qp
->sq
.sg_info
.nmap
= umem
->nmap
;
944 qplib_qp
->sq
.sg_info
.pgsize
= PAGE_SIZE
;
945 qplib_qp
->sq
.sg_info
.pgshft
= PAGE_SHIFT
;
946 qplib_qp
->qp_handle
= ureq
.qp_handle
;
948 if (!qp
->qplib_qp
.srq
) {
949 bytes
= (qplib_qp
->rq
.max_wqe
* qplib_qp
->rq
.wqe_size
);
950 bytes
= PAGE_ALIGN(bytes
);
951 umem
= ib_umem_get(&rdev
->ibdev
, ureq
.qprva
, bytes
,
952 IB_ACCESS_LOCAL_WRITE
);
956 qplib_qp
->rq
.sg_info
.sghead
= umem
->sg_head
.sgl
;
957 qplib_qp
->rq
.sg_info
.npages
= ib_umem_num_pages(umem
);
958 qplib_qp
->rq
.sg_info
.nmap
= umem
->nmap
;
959 qplib_qp
->rq
.sg_info
.pgsize
= PAGE_SIZE
;
960 qplib_qp
->rq
.sg_info
.pgshft
= PAGE_SHIFT
;
963 qplib_qp
->dpi
= &cntx
->dpi
;
966 ib_umem_release(qp
->sumem
);
968 memset(&qplib_qp
->sq
.sg_info
, 0, sizeof(qplib_qp
->sq
.sg_info
));
970 return PTR_ERR(umem
);
973 static struct bnxt_re_ah
*bnxt_re_create_shadow_qp_ah
974 (struct bnxt_re_pd
*pd
,
975 struct bnxt_qplib_res
*qp1_res
,
976 struct bnxt_qplib_qp
*qp1_qp
)
978 struct bnxt_re_dev
*rdev
= pd
->rdev
;
979 struct bnxt_re_ah
*ah
;
983 ah
= kzalloc(sizeof(*ah
), GFP_KERNEL
);
988 ah
->qplib_ah
.pd
= &pd
->qplib_pd
;
990 rc
= bnxt_re_query_gid(&rdev
->ibdev
, 1, 0, &sgid
);
994 /* supply the dgid data same as sgid */
995 memcpy(ah
->qplib_ah
.dgid
.data
, &sgid
.raw
,
996 sizeof(union ib_gid
));
997 ah
->qplib_ah
.sgid_index
= 0;
999 ah
->qplib_ah
.traffic_class
= 0;
1000 ah
->qplib_ah
.flow_label
= 0;
1001 ah
->qplib_ah
.hop_limit
= 1;
1002 ah
->qplib_ah
.sl
= 0;
1003 /* Have DMAC same as SMAC */
1004 ether_addr_copy(ah
->qplib_ah
.dmac
, rdev
->netdev
->dev_addr
);
1006 rc
= bnxt_qplib_create_ah(&rdev
->qplib_res
, &ah
->qplib_ah
, false);
1008 ibdev_err(&rdev
->ibdev
,
1009 "Failed to allocate HW AH for Shadow QP");
1020 static struct bnxt_re_qp
*bnxt_re_create_shadow_qp
1021 (struct bnxt_re_pd
*pd
,
1022 struct bnxt_qplib_res
*qp1_res
,
1023 struct bnxt_qplib_qp
*qp1_qp
)
1025 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1026 struct bnxt_re_qp
*qp
;
1029 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1035 /* Initialize the shadow QP structure from the QP1 values */
1036 ether_addr_copy(qp
->qplib_qp
.smac
, rdev
->netdev
->dev_addr
);
1038 qp
->qplib_qp
.pd
= &pd
->qplib_pd
;
1039 qp
->qplib_qp
.qp_handle
= (u64
)(unsigned long)(&qp
->qplib_qp
);
1040 qp
->qplib_qp
.type
= IB_QPT_UD
;
1042 qp
->qplib_qp
.max_inline_data
= 0;
1043 qp
->qplib_qp
.sig_type
= true;
1045 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1046 qp
->qplib_qp
.sq
.wqe_size
= bnxt_re_get_wqe_size(0, 6);
1047 qp
->qplib_qp
.sq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
1048 qp
->qplib_qp
.sq
.max_sge
= 2;
1049 /* Q full delta can be 1 since it is internal QP */
1050 qp
->qplib_qp
.sq
.q_full_delta
= 1;
1051 qp
->qplib_qp
.sq
.sg_info
.pgsize
= PAGE_SIZE
;
1052 qp
->qplib_qp
.sq
.sg_info
.pgshft
= PAGE_SHIFT
;
1054 qp
->qplib_qp
.scq
= qp1_qp
->scq
;
1055 qp
->qplib_qp
.rcq
= qp1_qp
->rcq
;
1057 qp
->qplib_qp
.rq
.wqe_size
= bnxt_re_get_rwqe_size(6);
1058 qp
->qplib_qp
.rq
.max_wqe
= qp1_qp
->rq
.max_wqe
;
1059 qp
->qplib_qp
.rq
.max_sge
= qp1_qp
->rq
.max_sge
;
1060 /* Q full delta can be 1 since it is internal QP */
1061 qp
->qplib_qp
.rq
.q_full_delta
= 1;
1062 qp
->qplib_qp
.rq
.sg_info
.pgsize
= PAGE_SIZE
;
1063 qp
->qplib_qp
.rq
.sg_info
.pgshft
= PAGE_SHIFT
;
1065 qp
->qplib_qp
.mtu
= qp1_qp
->mtu
;
1067 qp
->qplib_qp
.sq_hdr_buf_size
= 0;
1068 qp
->qplib_qp
.rq_hdr_buf_size
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
1069 qp
->qplib_qp
.dpi
= &rdev
->dpi_privileged
;
1071 rc
= bnxt_qplib_create_qp(qp1_res
, &qp
->qplib_qp
);
1075 spin_lock_init(&qp
->sq_lock
);
1076 INIT_LIST_HEAD(&qp
->list
);
1077 mutex_lock(&rdev
->qp_lock
);
1078 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1079 atomic_inc(&rdev
->qp_count
);
1080 mutex_unlock(&rdev
->qp_lock
);
1087 static int bnxt_re_init_rq_attr(struct bnxt_re_qp
*qp
,
1088 struct ib_qp_init_attr
*init_attr
)
1090 struct bnxt_qplib_dev_attr
*dev_attr
;
1091 struct bnxt_qplib_qp
*qplqp
;
1092 struct bnxt_re_dev
*rdev
;
1093 struct bnxt_qplib_q
*rq
;
1097 qplqp
= &qp
->qplib_qp
;
1099 dev_attr
= &rdev
->dev_attr
;
1101 if (init_attr
->srq
) {
1102 struct bnxt_re_srq
*srq
;
1104 srq
= container_of(init_attr
->srq
, struct bnxt_re_srq
, ib_srq
);
1106 ibdev_err(&rdev
->ibdev
, "SRQ not found");
1109 qplqp
->srq
= &srq
->qplib_srq
;
1112 rq
->max_sge
= init_attr
->cap
.max_recv_sge
;
1113 if (rq
->max_sge
> dev_attr
->max_qp_sges
)
1114 rq
->max_sge
= dev_attr
->max_qp_sges
;
1115 init_attr
->cap
.max_recv_sge
= rq
->max_sge
;
1116 rq
->wqe_size
= bnxt_re_setup_rwqe_size(qplqp
, rq
->max_sge
,
1117 dev_attr
->max_qp_sges
);
1118 /* Allocate 1 more than what's provided so posting max doesn't
1121 entries
= roundup_pow_of_two(init_attr
->cap
.max_recv_wr
+ 1);
1122 rq
->max_wqe
= min_t(u32
, entries
, dev_attr
->max_qp_wqes
+ 1);
1123 rq
->q_full_delta
= 0;
1124 rq
->sg_info
.pgsize
= PAGE_SIZE
;
1125 rq
->sg_info
.pgshft
= PAGE_SHIFT
;
1131 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp
*qp
)
1133 struct bnxt_qplib_dev_attr
*dev_attr
;
1134 struct bnxt_qplib_qp
*qplqp
;
1135 struct bnxt_re_dev
*rdev
;
1138 qplqp
= &qp
->qplib_qp
;
1139 dev_attr
= &rdev
->dev_attr
;
1141 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
)) {
1142 qplqp
->rq
.max_sge
= dev_attr
->max_qp_sges
;
1143 if (qplqp
->rq
.max_sge
> dev_attr
->max_qp_sges
)
1144 qplqp
->rq
.max_sge
= dev_attr
->max_qp_sges
;
1145 qplqp
->rq
.max_sge
= 6;
1149 static int bnxt_re_init_sq_attr(struct bnxt_re_qp
*qp
,
1150 struct ib_qp_init_attr
*init_attr
,
1151 struct ib_udata
*udata
)
1153 struct bnxt_qplib_dev_attr
*dev_attr
;
1154 struct bnxt_qplib_qp
*qplqp
;
1155 struct bnxt_re_dev
*rdev
;
1156 struct bnxt_qplib_q
*sq
;
1162 qplqp
= &qp
->qplib_qp
;
1164 dev_attr
= &rdev
->dev_attr
;
1166 sq
->max_sge
= init_attr
->cap
.max_send_sge
;
1167 if (sq
->max_sge
> dev_attr
->max_qp_sges
) {
1168 sq
->max_sge
= dev_attr
->max_qp_sges
;
1169 init_attr
->cap
.max_send_sge
= sq
->max_sge
;
1172 rc
= bnxt_re_setup_swqe_size(qp
, init_attr
);
1176 entries
= init_attr
->cap
.max_send_wr
;
1177 /* Allocate 128 + 1 more than what's provided */
1178 diff
= (qplqp
->wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
) ?
1179 0 : BNXT_QPLIB_RESERVED_QP_WRS
;
1180 entries
= roundup_pow_of_two(entries
+ diff
+ 1);
1181 sq
->max_wqe
= min_t(u32
, entries
, dev_attr
->max_qp_wqes
+ diff
+ 1);
1182 sq
->q_full_delta
= diff
+ 1;
1184 * Reserving one slot for Phantom WQE. Application can
1185 * post one extra entry in this case. But allowing this to avoid
1186 * unexpected Queue full condition
1188 qplqp
->sq
.q_full_delta
-= 1;
1189 qplqp
->sq
.sg_info
.pgsize
= PAGE_SIZE
;
1190 qplqp
->sq
.sg_info
.pgshft
= PAGE_SHIFT
;
1195 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp
*qp
,
1196 struct ib_qp_init_attr
*init_attr
)
1198 struct bnxt_qplib_dev_attr
*dev_attr
;
1199 struct bnxt_qplib_qp
*qplqp
;
1200 struct bnxt_re_dev
*rdev
;
1204 qplqp
= &qp
->qplib_qp
;
1205 dev_attr
= &rdev
->dev_attr
;
1207 if (!bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
)) {
1208 entries
= roundup_pow_of_two(init_attr
->cap
.max_send_wr
+ 1);
1209 qplqp
->sq
.max_wqe
= min_t(u32
, entries
,
1210 dev_attr
->max_qp_wqes
+ 1);
1211 qplqp
->sq
.q_full_delta
= qplqp
->sq
.max_wqe
-
1212 init_attr
->cap
.max_send_wr
;
1213 qplqp
->sq
.max_sge
++; /* Need one extra sge to put UD header */
1214 if (qplqp
->sq
.max_sge
> dev_attr
->max_qp_sges
)
1215 qplqp
->sq
.max_sge
= dev_attr
->max_qp_sges
;
1219 static int bnxt_re_init_qp_type(struct bnxt_re_dev
*rdev
,
1220 struct ib_qp_init_attr
*init_attr
)
1222 struct bnxt_qplib_chip_ctx
*chip_ctx
;
1225 chip_ctx
= rdev
->chip_ctx
;
1227 qptype
= __from_ib_qp_type(init_attr
->qp_type
);
1228 if (qptype
== IB_QPT_MAX
) {
1229 ibdev_err(&rdev
->ibdev
, "QP type 0x%x not supported", qptype
);
1230 qptype
= -EOPNOTSUPP
;
1234 if (bnxt_qplib_is_chip_gen_p5(chip_ctx
) &&
1235 init_attr
->qp_type
== IB_QPT_GSI
)
1236 qptype
= CMDQ_CREATE_QP_TYPE_GSI
;
1241 static int bnxt_re_init_qp_attr(struct bnxt_re_qp
*qp
, struct bnxt_re_pd
*pd
,
1242 struct ib_qp_init_attr
*init_attr
,
1243 struct ib_udata
*udata
)
1245 struct bnxt_qplib_dev_attr
*dev_attr
;
1246 struct bnxt_qplib_qp
*qplqp
;
1247 struct bnxt_re_dev
*rdev
;
1248 struct bnxt_re_cq
*cq
;
1252 qplqp
= &qp
->qplib_qp
;
1253 dev_attr
= &rdev
->dev_attr
;
1255 /* Setup misc params */
1256 ether_addr_copy(qplqp
->smac
, rdev
->netdev
->dev_addr
);
1257 qplqp
->pd
= &pd
->qplib_pd
;
1258 qplqp
->qp_handle
= (u64
)qplqp
;
1259 qplqp
->max_inline_data
= init_attr
->cap
.max_inline_data
;
1260 qplqp
->sig_type
= ((init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ?
1262 qptype
= bnxt_re_init_qp_type(rdev
, init_attr
);
1267 qplqp
->type
= (u8
)qptype
;
1268 qplqp
->wqe_mode
= rdev
->chip_ctx
->modes
.wqe_mode
;
1270 if (init_attr
->qp_type
== IB_QPT_RC
) {
1271 qplqp
->max_rd_atomic
= dev_attr
->max_qp_rd_atom
;
1272 qplqp
->max_dest_rd_atomic
= dev_attr
->max_qp_init_rd_atom
;
1274 qplqp
->mtu
= ib_mtu_enum_to_int(iboe_get_mtu(rdev
->netdev
->mtu
));
1275 qplqp
->dpi
= &rdev
->dpi_privileged
; /* Doorbell page */
1276 if (init_attr
->create_flags
)
1277 ibdev_dbg(&rdev
->ibdev
,
1278 "QP create flags 0x%x not supported",
1279 init_attr
->create_flags
);
1282 if (init_attr
->send_cq
) {
1283 cq
= container_of(init_attr
->send_cq
, struct bnxt_re_cq
, ib_cq
);
1285 ibdev_err(&rdev
->ibdev
, "Send CQ not found");
1289 qplqp
->scq
= &cq
->qplib_cq
;
1293 if (init_attr
->recv_cq
) {
1294 cq
= container_of(init_attr
->recv_cq
, struct bnxt_re_cq
, ib_cq
);
1296 ibdev_err(&rdev
->ibdev
, "Receive CQ not found");
1300 qplqp
->rcq
= &cq
->qplib_cq
;
1305 rc
= bnxt_re_init_rq_attr(qp
, init_attr
);
1308 if (init_attr
->qp_type
== IB_QPT_GSI
)
1309 bnxt_re_adjust_gsi_rq_attr(qp
);
1312 rc
= bnxt_re_init_sq_attr(qp
, init_attr
, udata
);
1315 if (init_attr
->qp_type
== IB_QPT_GSI
)
1316 bnxt_re_adjust_gsi_sq_attr(qp
, init_attr
);
1318 if (udata
) /* This will update DPI and qp_handle */
1319 rc
= bnxt_re_init_user_qp(rdev
, pd
, qp
, udata
);
1324 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp
*qp
,
1325 struct bnxt_re_pd
*pd
)
1327 struct bnxt_re_sqp_entries
*sqp_tbl
= NULL
;
1328 struct bnxt_re_dev
*rdev
;
1329 struct bnxt_re_qp
*sqp
;
1330 struct bnxt_re_ah
*sah
;
1334 /* Create a shadow QP to handle the QP1 traffic */
1335 sqp_tbl
= kzalloc(sizeof(*sqp_tbl
) * BNXT_RE_MAX_GSI_SQP_ENTRIES
,
1339 rdev
->gsi_ctx
.sqp_tbl
= sqp_tbl
;
1341 sqp
= bnxt_re_create_shadow_qp(pd
, &rdev
->qplib_res
, &qp
->qplib_qp
);
1344 ibdev_err(&rdev
->ibdev
, "Failed to create Shadow QP for QP1");
1347 rdev
->gsi_ctx
.gsi_sqp
= sqp
;
1351 sah
= bnxt_re_create_shadow_qp_ah(pd
, &rdev
->qplib_res
,
1354 bnxt_qplib_destroy_qp(&rdev
->qplib_res
,
1357 ibdev_err(&rdev
->ibdev
,
1358 "Failed to create AH entry for ShadowQP");
1361 rdev
->gsi_ctx
.gsi_sah
= sah
;
1369 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp
*qp
, struct bnxt_re_pd
*pd
,
1370 struct ib_qp_init_attr
*init_attr
)
1372 struct bnxt_re_dev
*rdev
;
1373 struct bnxt_qplib_qp
*qplqp
;
1377 qplqp
= &qp
->qplib_qp
;
1379 qplqp
->rq_hdr_buf_size
= BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
1380 qplqp
->sq_hdr_buf_size
= BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2
;
1382 rc
= bnxt_qplib_create_qp1(&rdev
->qplib_res
, qplqp
);
1384 ibdev_err(&rdev
->ibdev
, "create HW QP1 failed!");
1388 rc
= bnxt_re_create_shadow_gsi(qp
, pd
);
1393 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev
*rdev
,
1394 struct ib_qp_init_attr
*init_attr
,
1395 struct bnxt_qplib_dev_attr
*dev_attr
)
1399 if (init_attr
->cap
.max_send_wr
> dev_attr
->max_qp_wqes
||
1400 init_attr
->cap
.max_recv_wr
> dev_attr
->max_qp_wqes
||
1401 init_attr
->cap
.max_send_sge
> dev_attr
->max_qp_sges
||
1402 init_attr
->cap
.max_recv_sge
> dev_attr
->max_qp_sges
||
1403 init_attr
->cap
.max_inline_data
> dev_attr
->max_inline_data
) {
1404 ibdev_err(&rdev
->ibdev
,
1405 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1406 init_attr
->cap
.max_send_wr
, dev_attr
->max_qp_wqes
,
1407 init_attr
->cap
.max_recv_wr
, dev_attr
->max_qp_wqes
,
1408 init_attr
->cap
.max_send_sge
, dev_attr
->max_qp_sges
,
1409 init_attr
->cap
.max_recv_sge
, dev_attr
->max_qp_sges
,
1410 init_attr
->cap
.max_inline_data
,
1411 dev_attr
->max_inline_data
);
1417 struct ib_qp
*bnxt_re_create_qp(struct ib_pd
*ib_pd
,
1418 struct ib_qp_init_attr
*qp_init_attr
,
1419 struct ib_udata
*udata
)
1421 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
1422 struct bnxt_re_dev
*rdev
= pd
->rdev
;
1423 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1424 struct bnxt_re_qp
*qp
;
1427 rc
= bnxt_re_test_qp_limits(rdev
, qp_init_attr
, dev_attr
);
1433 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1439 rc
= bnxt_re_init_qp_attr(qp
, pd
, qp_init_attr
, udata
);
1443 if (qp_init_attr
->qp_type
== IB_QPT_GSI
&&
1444 !(bnxt_qplib_is_chip_gen_p5(rdev
->chip_ctx
))) {
1445 rc
= bnxt_re_create_gsi_qp(qp
, pd
, qp_init_attr
);
1451 rc
= bnxt_qplib_create_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1453 ibdev_err(&rdev
->ibdev
, "Failed to create HW QP");
1457 struct bnxt_re_qp_resp resp
;
1459 resp
.qpid
= qp
->qplib_qp
.id
;
1461 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1463 ibdev_err(&rdev
->ibdev
, "Failed to copy QP udata");
1469 qp
->ib_qp
.qp_num
= qp
->qplib_qp
.id
;
1470 if (qp_init_attr
->qp_type
== IB_QPT_GSI
)
1471 rdev
->gsi_ctx
.gsi_qp
= qp
;
1472 spin_lock_init(&qp
->sq_lock
);
1473 spin_lock_init(&qp
->rq_lock
);
1474 INIT_LIST_HEAD(&qp
->list
);
1475 mutex_lock(&rdev
->qp_lock
);
1476 list_add_tail(&qp
->list
, &rdev
->qp_list
);
1477 mutex_unlock(&rdev
->qp_lock
);
1478 atomic_inc(&rdev
->qp_count
);
1482 bnxt_qplib_destroy_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1484 ib_umem_release(qp
->rumem
);
1485 ib_umem_release(qp
->sumem
);
1492 static u8
__from_ib_qp_state(enum ib_qp_state state
)
1496 return CMDQ_MODIFY_QP_NEW_STATE_RESET
;
1498 return CMDQ_MODIFY_QP_NEW_STATE_INIT
;
1500 return CMDQ_MODIFY_QP_NEW_STATE_RTR
;
1502 return CMDQ_MODIFY_QP_NEW_STATE_RTS
;
1504 return CMDQ_MODIFY_QP_NEW_STATE_SQD
;
1506 return CMDQ_MODIFY_QP_NEW_STATE_SQE
;
1509 return CMDQ_MODIFY_QP_NEW_STATE_ERR
;
1513 static enum ib_qp_state
__to_ib_qp_state(u8 state
)
1516 case CMDQ_MODIFY_QP_NEW_STATE_RESET
:
1517 return IB_QPS_RESET
;
1518 case CMDQ_MODIFY_QP_NEW_STATE_INIT
:
1520 case CMDQ_MODIFY_QP_NEW_STATE_RTR
:
1522 case CMDQ_MODIFY_QP_NEW_STATE_RTS
:
1524 case CMDQ_MODIFY_QP_NEW_STATE_SQD
:
1526 case CMDQ_MODIFY_QP_NEW_STATE_SQE
:
1528 case CMDQ_MODIFY_QP_NEW_STATE_ERR
:
1534 static u32
__from_ib_mtu(enum ib_mtu mtu
)
1538 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256
;
1540 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512
;
1542 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
;
1544 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1546 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
;
1548 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
;
1552 static enum ib_mtu
__to_ib_mtu(u32 mtu
)
1554 switch (mtu
& CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK
) {
1555 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256
:
1557 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512
:
1559 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024
:
1561 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048
:
1563 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096
:
1570 /* Shared Receive Queues */
1571 void bnxt_re_destroy_srq(struct ib_srq
*ib_srq
, struct ib_udata
*udata
)
1573 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1575 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1576 struct bnxt_qplib_srq
*qplib_srq
= &srq
->qplib_srq
;
1577 struct bnxt_qplib_nq
*nq
= NULL
;
1580 nq
= qplib_srq
->cq
->nq
;
1581 bnxt_qplib_destroy_srq(&rdev
->qplib_res
, qplib_srq
);
1582 ib_umem_release(srq
->umem
);
1583 atomic_dec(&rdev
->srq_count
);
1588 static int bnxt_re_init_user_srq(struct bnxt_re_dev
*rdev
,
1589 struct bnxt_re_pd
*pd
,
1590 struct bnxt_re_srq
*srq
,
1591 struct ib_udata
*udata
)
1593 struct bnxt_re_srq_req ureq
;
1594 struct bnxt_qplib_srq
*qplib_srq
= &srq
->qplib_srq
;
1595 struct ib_umem
*umem
;
1597 struct bnxt_re_ucontext
*cntx
= rdma_udata_to_drv_context(
1598 udata
, struct bnxt_re_ucontext
, ib_uctx
);
1600 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
1603 bytes
= (qplib_srq
->max_wqe
* qplib_srq
->wqe_size
);
1604 bytes
= PAGE_ALIGN(bytes
);
1605 umem
= ib_umem_get(&rdev
->ibdev
, ureq
.srqva
, bytes
,
1606 IB_ACCESS_LOCAL_WRITE
);
1608 return PTR_ERR(umem
);
1611 qplib_srq
->sg_info
.sghead
= umem
->sg_head
.sgl
;
1612 qplib_srq
->sg_info
.npages
= ib_umem_num_pages(umem
);
1613 qplib_srq
->sg_info
.nmap
= umem
->nmap
;
1614 qplib_srq
->sg_info
.pgsize
= PAGE_SIZE
;
1615 qplib_srq
->sg_info
.pgshft
= PAGE_SHIFT
;
1616 qplib_srq
->srq_handle
= ureq
.srq_handle
;
1617 qplib_srq
->dpi
= &cntx
->dpi
;
1622 int bnxt_re_create_srq(struct ib_srq
*ib_srq
,
1623 struct ib_srq_init_attr
*srq_init_attr
,
1624 struct ib_udata
*udata
)
1626 struct bnxt_qplib_dev_attr
*dev_attr
;
1627 struct bnxt_qplib_nq
*nq
= NULL
;
1628 struct bnxt_re_dev
*rdev
;
1629 struct bnxt_re_srq
*srq
;
1630 struct bnxt_re_pd
*pd
;
1631 struct ib_pd
*ib_pd
;
1635 pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
1637 dev_attr
= &rdev
->dev_attr
;
1638 srq
= container_of(ib_srq
, struct bnxt_re_srq
, ib_srq
);
1640 if (srq_init_attr
->attr
.max_wr
>= dev_attr
->max_srq_wqes
) {
1641 ibdev_err(&rdev
->ibdev
, "Create CQ failed - max exceeded");
1646 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
) {
1652 srq
->qplib_srq
.pd
= &pd
->qplib_pd
;
1653 srq
->qplib_srq
.dpi
= &rdev
->dpi_privileged
;
1654 /* Allocate 1 more than what's provided so posting max doesn't
1657 entries
= roundup_pow_of_two(srq_init_attr
->attr
.max_wr
+ 1);
1658 if (entries
> dev_attr
->max_srq_wqes
+ 1)
1659 entries
= dev_attr
->max_srq_wqes
+ 1;
1660 srq
->qplib_srq
.max_wqe
= entries
;
1662 srq
->qplib_srq
.max_sge
= srq_init_attr
->attr
.max_sge
;
1663 srq
->qplib_srq
.wqe_size
=
1664 bnxt_re_get_rwqe_size(srq
->qplib_srq
.max_sge
);
1665 srq
->qplib_srq
.threshold
= srq_init_attr
->attr
.srq_limit
;
1666 srq
->srq_limit
= srq_init_attr
->attr
.srq_limit
;
1667 srq
->qplib_srq
.eventq_hw_ring_id
= rdev
->nq
[0].ring_id
;
1671 rc
= bnxt_re_init_user_srq(rdev
, pd
, srq
, udata
);
1676 rc
= bnxt_qplib_create_srq(&rdev
->qplib_res
, &srq
->qplib_srq
);
1678 ibdev_err(&rdev
->ibdev
, "Create HW SRQ failed!");
1683 struct bnxt_re_srq_resp resp
;
1685 resp
.srqid
= srq
->qplib_srq
.id
;
1686 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1688 ibdev_err(&rdev
->ibdev
, "SRQ copy to udata failed!");
1689 bnxt_qplib_destroy_srq(&rdev
->qplib_res
,
1696 atomic_inc(&rdev
->srq_count
);
1701 ib_umem_release(srq
->umem
);
1706 int bnxt_re_modify_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*srq_attr
,
1707 enum ib_srq_attr_mask srq_attr_mask
,
1708 struct ib_udata
*udata
)
1710 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1712 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1715 switch (srq_attr_mask
) {
1717 /* SRQ resize is not supported */
1720 /* Change the SRQ threshold */
1721 if (srq_attr
->srq_limit
> srq
->qplib_srq
.max_wqe
)
1724 srq
->qplib_srq
.threshold
= srq_attr
->srq_limit
;
1725 rc
= bnxt_qplib_modify_srq(&rdev
->qplib_res
, &srq
->qplib_srq
);
1727 ibdev_err(&rdev
->ibdev
, "Modify HW SRQ failed!");
1730 /* On success, update the shadow */
1731 srq
->srq_limit
= srq_attr
->srq_limit
;
1732 /* No need to Build and send response back to udata */
1735 ibdev_err(&rdev
->ibdev
,
1736 "Unsupported srq_attr_mask 0x%x", srq_attr_mask
);
1742 int bnxt_re_query_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*srq_attr
)
1744 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1746 struct bnxt_re_srq tsrq
;
1747 struct bnxt_re_dev
*rdev
= srq
->rdev
;
1750 /* Get live SRQ attr */
1751 tsrq
.qplib_srq
.id
= srq
->qplib_srq
.id
;
1752 rc
= bnxt_qplib_query_srq(&rdev
->qplib_res
, &tsrq
.qplib_srq
);
1754 ibdev_err(&rdev
->ibdev
, "Query HW SRQ failed!");
1757 srq_attr
->max_wr
= srq
->qplib_srq
.max_wqe
;
1758 srq_attr
->max_sge
= srq
->qplib_srq
.max_sge
;
1759 srq_attr
->srq_limit
= tsrq
.qplib_srq
.threshold
;
1764 int bnxt_re_post_srq_recv(struct ib_srq
*ib_srq
, const struct ib_recv_wr
*wr
,
1765 const struct ib_recv_wr
**bad_wr
)
1767 struct bnxt_re_srq
*srq
= container_of(ib_srq
, struct bnxt_re_srq
,
1769 struct bnxt_qplib_swqe wqe
;
1770 unsigned long flags
;
1773 spin_lock_irqsave(&srq
->lock
, flags
);
1775 /* Transcribe each ib_recv_wr to qplib_swqe */
1776 wqe
.num_sge
= wr
->num_sge
;
1777 bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
, wr
->num_sge
);
1778 wqe
.wr_id
= wr
->wr_id
;
1779 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
1781 rc
= bnxt_qplib_post_srq_recv(&srq
->qplib_srq
, &wqe
);
1788 spin_unlock_irqrestore(&srq
->lock
, flags
);
1792 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev
*rdev
,
1793 struct bnxt_re_qp
*qp1_qp
,
1796 struct bnxt_re_qp
*qp
= rdev
->gsi_ctx
.gsi_sqp
;
1799 if (qp_attr_mask
& IB_QP_STATE
) {
1800 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1801 qp
->qplib_qp
.state
= qp1_qp
->qplib_qp
.state
;
1803 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1804 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1805 qp
->qplib_qp
.pkey_index
= qp1_qp
->qplib_qp
.pkey_index
;
1808 if (qp_attr_mask
& IB_QP_QKEY
) {
1809 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1810 /* Using a Random QKEY */
1811 qp
->qplib_qp
.qkey
= 0x81818181;
1813 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1814 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1815 qp
->qplib_qp
.sq
.psn
= qp1_qp
->qplib_qp
.sq
.psn
;
1818 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
1820 ibdev_err(&rdev
->ibdev
, "Failed to modify Shadow QP for QP1");
1824 int bnxt_re_modify_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
1825 int qp_attr_mask
, struct ib_udata
*udata
)
1827 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
1828 struct bnxt_re_dev
*rdev
= qp
->rdev
;
1829 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
1830 enum ib_qp_state curr_qp_state
, new_qp_state
;
1835 qp
->qplib_qp
.modify_flags
= 0;
1836 if (qp_attr_mask
& IB_QP_STATE
) {
1837 curr_qp_state
= __to_ib_qp_state(qp
->qplib_qp
.cur_qp_state
);
1838 new_qp_state
= qp_attr
->qp_state
;
1839 if (!ib_modify_qp_is_ok(curr_qp_state
, new_qp_state
,
1840 ib_qp
->qp_type
, qp_attr_mask
)) {
1841 ibdev_err(&rdev
->ibdev
,
1842 "Invalid attribute mask: %#x specified ",
1844 ibdev_err(&rdev
->ibdev
,
1845 "for qpn: %#x type: %#x",
1846 ib_qp
->qp_num
, ib_qp
->qp_type
);
1847 ibdev_err(&rdev
->ibdev
,
1848 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1849 curr_qp_state
, new_qp_state
);
1852 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_STATE
;
1853 qp
->qplib_qp
.state
= __from_ib_qp_state(qp_attr
->qp_state
);
1856 qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
1857 ibdev_dbg(&rdev
->ibdev
,
1858 "Move QP = %p to flush list\n", qp
);
1859 flags
= bnxt_re_lock_cqs(qp
);
1860 bnxt_qplib_add_flush_qp(&qp
->qplib_qp
);
1861 bnxt_re_unlock_cqs(qp
, flags
);
1864 qp
->qplib_qp
.state
== CMDQ_MODIFY_QP_NEW_STATE_RESET
) {
1865 ibdev_dbg(&rdev
->ibdev
,
1866 "Move QP = %p out of flush list\n", qp
);
1867 flags
= bnxt_re_lock_cqs(qp
);
1868 bnxt_qplib_clean_qp(&qp
->qplib_qp
);
1869 bnxt_re_unlock_cqs(qp
, flags
);
1872 if (qp_attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
) {
1873 qp
->qplib_qp
.modify_flags
|=
1874 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY
;
1875 qp
->qplib_qp
.en_sqd_async_notify
= true;
1877 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
1878 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS
;
1879 qp
->qplib_qp
.access
=
1880 __from_ib_access_flags(qp_attr
->qp_access_flags
);
1881 /* LOCAL_WRITE access must be set to allow RC receive */
1882 qp
->qplib_qp
.access
|= BNXT_QPLIB_ACCESS_LOCAL_WRITE
;
1883 /* Temp: Set all params on QP as of now */
1884 qp
->qplib_qp
.access
|= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE
;
1885 qp
->qplib_qp
.access
|= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ
;
1887 if (qp_attr_mask
& IB_QP_PKEY_INDEX
) {
1888 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY
;
1889 qp
->qplib_qp
.pkey_index
= qp_attr
->pkey_index
;
1891 if (qp_attr_mask
& IB_QP_QKEY
) {
1892 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY
;
1893 qp
->qplib_qp
.qkey
= qp_attr
->qkey
;
1895 if (qp_attr_mask
& IB_QP_AV
) {
1896 const struct ib_global_route
*grh
=
1897 rdma_ah_read_grh(&qp_attr
->ah_attr
);
1898 const struct ib_gid_attr
*sgid_attr
;
1899 struct bnxt_re_gid_ctx
*ctx
;
1901 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_DGID
|
1902 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL
|
1903 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX
|
1904 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT
|
1905 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS
|
1906 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC
|
1907 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID
;
1908 memcpy(qp
->qplib_qp
.ah
.dgid
.data
, grh
->dgid
.raw
,
1909 sizeof(qp
->qplib_qp
.ah
.dgid
.data
));
1910 qp
->qplib_qp
.ah
.flow_label
= grh
->flow_label
;
1911 sgid_attr
= grh
->sgid_attr
;
1912 /* Get the HW context of the GID. The reference
1913 * of GID table entry is already taken by the caller.
1915 ctx
= rdma_read_gid_hw_context(sgid_attr
);
1916 qp
->qplib_qp
.ah
.sgid_index
= ctx
->idx
;
1917 qp
->qplib_qp
.ah
.host_sgid_index
= grh
->sgid_index
;
1918 qp
->qplib_qp
.ah
.hop_limit
= grh
->hop_limit
;
1919 qp
->qplib_qp
.ah
.traffic_class
= grh
->traffic_class
;
1920 qp
->qplib_qp
.ah
.sl
= rdma_ah_get_sl(&qp_attr
->ah_attr
);
1921 ether_addr_copy(qp
->qplib_qp
.ah
.dmac
,
1922 qp_attr
->ah_attr
.roce
.dmac
);
1924 rc
= rdma_read_gid_l2_fields(sgid_attr
, NULL
,
1925 &qp
->qplib_qp
.smac
[0]);
1929 nw_type
= rdma_gid_attr_network_type(sgid_attr
);
1931 case RDMA_NETWORK_IPV4
:
1932 qp
->qplib_qp
.nw_type
=
1933 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4
;
1935 case RDMA_NETWORK_IPV6
:
1936 qp
->qplib_qp
.nw_type
=
1937 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
;
1940 qp
->qplib_qp
.nw_type
=
1941 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1
;
1946 if (qp_attr_mask
& IB_QP_PATH_MTU
) {
1947 qp
->qplib_qp
.modify_flags
|=
1948 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1949 qp
->qplib_qp
.path_mtu
= __from_ib_mtu(qp_attr
->path_mtu
);
1950 qp
->qplib_qp
.mtu
= ib_mtu_enum_to_int(qp_attr
->path_mtu
);
1951 } else if (qp_attr
->qp_state
== IB_QPS_RTR
) {
1952 qp
->qplib_qp
.modify_flags
|=
1953 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU
;
1954 qp
->qplib_qp
.path_mtu
=
1955 __from_ib_mtu(iboe_get_mtu(rdev
->netdev
->mtu
));
1957 ib_mtu_enum_to_int(iboe_get_mtu(rdev
->netdev
->mtu
));
1960 if (qp_attr_mask
& IB_QP_TIMEOUT
) {
1961 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT
;
1962 qp
->qplib_qp
.timeout
= qp_attr
->timeout
;
1964 if (qp_attr_mask
& IB_QP_RETRY_CNT
) {
1965 qp
->qplib_qp
.modify_flags
|=
1966 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT
;
1967 qp
->qplib_qp
.retry_cnt
= qp_attr
->retry_cnt
;
1969 if (qp_attr_mask
& IB_QP_RNR_RETRY
) {
1970 qp
->qplib_qp
.modify_flags
|=
1971 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY
;
1972 qp
->qplib_qp
.rnr_retry
= qp_attr
->rnr_retry
;
1974 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1975 qp
->qplib_qp
.modify_flags
|=
1976 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER
;
1977 qp
->qplib_qp
.min_rnr_timer
= qp_attr
->min_rnr_timer
;
1979 if (qp_attr_mask
& IB_QP_RQ_PSN
) {
1980 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN
;
1981 qp
->qplib_qp
.rq
.psn
= qp_attr
->rq_psn
;
1983 if (qp_attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1984 qp
->qplib_qp
.modify_flags
|=
1985 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC
;
1986 /* Cap the max_rd_atomic to device max */
1987 qp
->qplib_qp
.max_rd_atomic
= min_t(u32
, qp_attr
->max_rd_atomic
,
1988 dev_attr
->max_qp_rd_atom
);
1990 if (qp_attr_mask
& IB_QP_SQ_PSN
) {
1991 qp
->qplib_qp
.modify_flags
|= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN
;
1992 qp
->qplib_qp
.sq
.psn
= qp_attr
->sq_psn
;
1994 if (qp_attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1995 if (qp_attr
->max_dest_rd_atomic
>
1996 dev_attr
->max_qp_init_rd_atom
) {
1997 ibdev_err(&rdev
->ibdev
,
1998 "max_dest_rd_atomic requested%d is > dev_max%d",
1999 qp_attr
->max_dest_rd_atomic
,
2000 dev_attr
->max_qp_init_rd_atom
);
2004 qp
->qplib_qp
.modify_flags
|=
2005 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC
;
2006 qp
->qplib_qp
.max_dest_rd_atomic
= qp_attr
->max_dest_rd_atomic
;
2008 if (qp_attr_mask
& IB_QP_CAP
) {
2009 qp
->qplib_qp
.modify_flags
|=
2010 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE
|
2011 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE
|
2012 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE
|
2013 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE
|
2014 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA
;
2015 if ((qp_attr
->cap
.max_send_wr
>= dev_attr
->max_qp_wqes
) ||
2016 (qp_attr
->cap
.max_recv_wr
>= dev_attr
->max_qp_wqes
) ||
2017 (qp_attr
->cap
.max_send_sge
>= dev_attr
->max_qp_sges
) ||
2018 (qp_attr
->cap
.max_recv_sge
>= dev_attr
->max_qp_sges
) ||
2019 (qp_attr
->cap
.max_inline_data
>=
2020 dev_attr
->max_inline_data
)) {
2021 ibdev_err(&rdev
->ibdev
,
2022 "Create QP failed - max exceeded");
2025 entries
= roundup_pow_of_two(qp_attr
->cap
.max_send_wr
);
2026 qp
->qplib_qp
.sq
.max_wqe
= min_t(u32
, entries
,
2027 dev_attr
->max_qp_wqes
+ 1);
2028 qp
->qplib_qp
.sq
.q_full_delta
= qp
->qplib_qp
.sq
.max_wqe
-
2029 qp_attr
->cap
.max_send_wr
;
2031 * Reserving one slot for Phantom WQE. Some application can
2032 * post one extra entry in this case. Allowing this to avoid
2033 * unexpected Queue full condition
2035 qp
->qplib_qp
.sq
.q_full_delta
-= 1;
2036 qp
->qplib_qp
.sq
.max_sge
= qp_attr
->cap
.max_send_sge
;
2037 if (qp
->qplib_qp
.rq
.max_wqe
) {
2038 entries
= roundup_pow_of_two(qp_attr
->cap
.max_recv_wr
);
2039 qp
->qplib_qp
.rq
.max_wqe
=
2040 min_t(u32
, entries
, dev_attr
->max_qp_wqes
+ 1);
2041 qp
->qplib_qp
.rq
.q_full_delta
= qp
->qplib_qp
.rq
.max_wqe
-
2042 qp_attr
->cap
.max_recv_wr
;
2043 qp
->qplib_qp
.rq
.max_sge
= qp_attr
->cap
.max_recv_sge
;
2045 /* SRQ was used prior, just ignore the RQ caps */
2048 if (qp_attr_mask
& IB_QP_DEST_QPN
) {
2049 qp
->qplib_qp
.modify_flags
|=
2050 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID
;
2051 qp
->qplib_qp
.dest_qpn
= qp_attr
->dest_qp_num
;
2053 rc
= bnxt_qplib_modify_qp(&rdev
->qplib_res
, &qp
->qplib_qp
);
2055 ibdev_err(&rdev
->ibdev
, "Failed to modify HW QP");
2058 if (ib_qp
->qp_type
== IB_QPT_GSI
&& rdev
->gsi_ctx
.gsi_sqp
)
2059 rc
= bnxt_re_modify_shadow_qp(rdev
, qp
, qp_attr_mask
);
2063 int bnxt_re_query_qp(struct ib_qp
*ib_qp
, struct ib_qp_attr
*qp_attr
,
2064 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
2066 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2067 struct bnxt_re_dev
*rdev
= qp
->rdev
;
2068 struct bnxt_qplib_qp
*qplib_qp
;
2071 qplib_qp
= kzalloc(sizeof(*qplib_qp
), GFP_KERNEL
);
2075 qplib_qp
->id
= qp
->qplib_qp
.id
;
2076 qplib_qp
->ah
.host_sgid_index
= qp
->qplib_qp
.ah
.host_sgid_index
;
2078 rc
= bnxt_qplib_query_qp(&rdev
->qplib_res
, qplib_qp
);
2080 ibdev_err(&rdev
->ibdev
, "Failed to query HW QP");
2083 qp_attr
->qp_state
= __to_ib_qp_state(qplib_qp
->state
);
2084 qp_attr
->en_sqd_async_notify
= qplib_qp
->en_sqd_async_notify
? 1 : 0;
2085 qp_attr
->qp_access_flags
= __to_ib_access_flags(qplib_qp
->access
);
2086 qp_attr
->pkey_index
= qplib_qp
->pkey_index
;
2087 qp_attr
->qkey
= qplib_qp
->qkey
;
2088 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
2089 rdma_ah_set_grh(&qp_attr
->ah_attr
, NULL
, qplib_qp
->ah
.flow_label
,
2090 qplib_qp
->ah
.host_sgid_index
,
2091 qplib_qp
->ah
.hop_limit
,
2092 qplib_qp
->ah
.traffic_class
);
2093 rdma_ah_set_dgid_raw(&qp_attr
->ah_attr
, qplib_qp
->ah
.dgid
.data
);
2094 rdma_ah_set_sl(&qp_attr
->ah_attr
, qplib_qp
->ah
.sl
);
2095 ether_addr_copy(qp_attr
->ah_attr
.roce
.dmac
, qplib_qp
->ah
.dmac
);
2096 qp_attr
->path_mtu
= __to_ib_mtu(qplib_qp
->path_mtu
);
2097 qp_attr
->timeout
= qplib_qp
->timeout
;
2098 qp_attr
->retry_cnt
= qplib_qp
->retry_cnt
;
2099 qp_attr
->rnr_retry
= qplib_qp
->rnr_retry
;
2100 qp_attr
->min_rnr_timer
= qplib_qp
->min_rnr_timer
;
2101 qp_attr
->rq_psn
= qplib_qp
->rq
.psn
;
2102 qp_attr
->max_rd_atomic
= qplib_qp
->max_rd_atomic
;
2103 qp_attr
->sq_psn
= qplib_qp
->sq
.psn
;
2104 qp_attr
->max_dest_rd_atomic
= qplib_qp
->max_dest_rd_atomic
;
2105 qp_init_attr
->sq_sig_type
= qplib_qp
->sig_type
? IB_SIGNAL_ALL_WR
:
2107 qp_attr
->dest_qp_num
= qplib_qp
->dest_qpn
;
2109 qp_attr
->cap
.max_send_wr
= qp
->qplib_qp
.sq
.max_wqe
;
2110 qp_attr
->cap
.max_send_sge
= qp
->qplib_qp
.sq
.max_sge
;
2111 qp_attr
->cap
.max_recv_wr
= qp
->qplib_qp
.rq
.max_wqe
;
2112 qp_attr
->cap
.max_recv_sge
= qp
->qplib_qp
.rq
.max_sge
;
2113 qp_attr
->cap
.max_inline_data
= qp
->qplib_qp
.max_inline_data
;
2114 qp_init_attr
->cap
= qp_attr
->cap
;
2121 /* Routine for sending QP1 packets for RoCE V1 an V2
2123 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp
*qp
,
2124 const struct ib_send_wr
*wr
,
2125 struct bnxt_qplib_swqe
*wqe
,
2128 struct bnxt_re_ah
*ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
,
2130 struct bnxt_qplib_ah
*qplib_ah
= &ah
->qplib_ah
;
2131 const struct ib_gid_attr
*sgid_attr
= ah
->ib_ah
.sgid_attr
;
2132 struct bnxt_qplib_sge sge
;
2136 bool is_eth
= false;
2137 bool is_vlan
= false;
2138 bool is_grh
= false;
2139 bool is_udp
= false;
2141 u16 vlan_id
= 0xFFFF;
2145 memset(&qp
->qp1_hdr
, 0, sizeof(qp
->qp1_hdr
));
2147 rc
= rdma_read_gid_l2_fields(sgid_attr
, &vlan_id
, NULL
);
2151 /* Get network header type for this GID */
2152 nw_type
= rdma_gid_attr_network_type(sgid_attr
);
2154 case RDMA_NETWORK_IPV4
:
2155 nw_type
= BNXT_RE_ROCEV2_IPV4_PACKET
;
2157 case RDMA_NETWORK_IPV6
:
2158 nw_type
= BNXT_RE_ROCEV2_IPV6_PACKET
;
2161 nw_type
= BNXT_RE_ROCE_V1_PACKET
;
2164 memcpy(&dgid
.raw
, &qplib_ah
->dgid
, 16);
2165 is_udp
= sgid_attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
2167 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid_attr
->gid
)) {
2169 ether_type
= ETH_P_IP
;
2172 ether_type
= ETH_P_IPV6
;
2176 ether_type
= ETH_P_IBOE
;
2181 is_vlan
= (vlan_id
&& (vlan_id
< 0x1000)) ? true : false;
2183 ib_ud_header_init(payload_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
2184 ip_version
, is_udp
, 0, &qp
->qp1_hdr
);
2187 ether_addr_copy(qp
->qp1_hdr
.eth
.dmac_h
, ah
->qplib_ah
.dmac
);
2188 ether_addr_copy(qp
->qp1_hdr
.eth
.smac_h
, qp
->qplib_qp
.smac
);
2190 /* For vlan, check the sgid for vlan existence */
2193 qp
->qp1_hdr
.eth
.type
= cpu_to_be16(ether_type
);
2195 qp
->qp1_hdr
.vlan
.type
= cpu_to_be16(ether_type
);
2196 qp
->qp1_hdr
.vlan
.tag
= cpu_to_be16(vlan_id
);
2199 if (is_grh
|| (ip_version
== 6)) {
2200 memcpy(qp
->qp1_hdr
.grh
.source_gid
.raw
, sgid_attr
->gid
.raw
,
2201 sizeof(sgid_attr
->gid
));
2202 memcpy(qp
->qp1_hdr
.grh
.destination_gid
.raw
, qplib_ah
->dgid
.data
,
2203 sizeof(sgid_attr
->gid
));
2204 qp
->qp1_hdr
.grh
.hop_limit
= qplib_ah
->hop_limit
;
2207 if (ip_version
== 4) {
2208 qp
->qp1_hdr
.ip4
.tos
= 0;
2209 qp
->qp1_hdr
.ip4
.id
= 0;
2210 qp
->qp1_hdr
.ip4
.frag_off
= htons(IP_DF
);
2211 qp
->qp1_hdr
.ip4
.ttl
= qplib_ah
->hop_limit
;
2213 memcpy(&qp
->qp1_hdr
.ip4
.saddr
, sgid_attr
->gid
.raw
+ 12, 4);
2214 memcpy(&qp
->qp1_hdr
.ip4
.daddr
, qplib_ah
->dgid
.data
+ 12, 4);
2215 qp
->qp1_hdr
.ip4
.check
= ib_ud_ip4_csum(&qp
->qp1_hdr
);
2219 qp
->qp1_hdr
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
2220 qp
->qp1_hdr
.udp
.sport
= htons(0x8CD1);
2221 qp
->qp1_hdr
.udp
.csum
= 0;
2225 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
2226 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
2227 qp
->qp1_hdr
.immediate_present
= 1;
2229 qp
->qp1_hdr
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2231 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2232 qp
->qp1_hdr
.bth
.solicited_event
= 1;
2234 qp
->qp1_hdr
.bth
.pad_count
= (4 - payload_size
) & 3;
2236 /* P_key for QP1 is for all members */
2237 qp
->qp1_hdr
.bth
.pkey
= cpu_to_be16(0xFFFF);
2238 qp
->qp1_hdr
.bth
.destination_qpn
= IB_QP1
;
2239 qp
->qp1_hdr
.bth
.ack_req
= 0;
2241 qp
->send_psn
&= BTH_PSN_MASK
;
2242 qp
->qp1_hdr
.bth
.psn
= cpu_to_be32(qp
->send_psn
);
2244 /* Use the priviledged Q_Key for QP1 */
2245 qp
->qp1_hdr
.deth
.qkey
= cpu_to_be32(IB_QP1_QKEY
);
2246 qp
->qp1_hdr
.deth
.source_qpn
= IB_QP1
;
2248 /* Pack the QP1 to the transmit buffer */
2249 buf
= bnxt_qplib_get_qp1_sq_buf(&qp
->qplib_qp
, &sge
);
2251 ib_ud_header_pack(&qp
->qp1_hdr
, buf
);
2252 for (i
= wqe
->num_sge
; i
; i
--) {
2253 wqe
->sg_list
[i
].addr
= wqe
->sg_list
[i
- 1].addr
;
2254 wqe
->sg_list
[i
].lkey
= wqe
->sg_list
[i
- 1].lkey
;
2255 wqe
->sg_list
[i
].size
= wqe
->sg_list
[i
- 1].size
;
2259 * Max Header buf size for IPV6 RoCE V2 is 86,
2260 * which is same as the QP1 SQ header buffer.
2261 * Header buf size for IPV4 RoCE V2 can be 66.
2262 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2263 * Subtract 20 bytes from QP1 SQ header buf size
2265 if (is_udp
&& ip_version
== 4)
2268 * Max Header buf size for RoCE V1 is 78.
2269 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2270 * Subtract 8 bytes from QP1 SQ header buf size
2275 /* Subtract 4 bytes for non vlan packets */
2279 wqe
->sg_list
[0].addr
= sge
.addr
;
2280 wqe
->sg_list
[0].lkey
= sge
.lkey
;
2281 wqe
->sg_list
[0].size
= sge
.size
;
2285 ibdev_err(&qp
->rdev
->ibdev
, "QP1 buffer is empty!");
2291 /* For the MAD layer, it only provides the recv SGE the size of
2292 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2293 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2294 * receive packet (334 bytes) with no VLAN and then copy the GRH
2295 * and the MAD datagram out to the provided SGE.
2297 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp
*qp
,
2298 const struct ib_recv_wr
*wr
,
2299 struct bnxt_qplib_swqe
*wqe
,
2302 struct bnxt_re_sqp_entries
*sqp_entry
;
2303 struct bnxt_qplib_sge ref
, sge
;
2304 struct bnxt_re_dev
*rdev
;
2309 rq_prod_index
= bnxt_qplib_get_rq_prod_index(&qp
->qplib_qp
);
2311 if (!bnxt_qplib_get_qp1_rq_buf(&qp
->qplib_qp
, &sge
))
2314 /* Create 1 SGE to receive the entire
2317 /* Save the reference from ULP */
2318 ref
.addr
= wqe
->sg_list
[0].addr
;
2319 ref
.lkey
= wqe
->sg_list
[0].lkey
;
2320 ref
.size
= wqe
->sg_list
[0].size
;
2322 sqp_entry
= &rdev
->gsi_ctx
.sqp_tbl
[rq_prod_index
];
2325 wqe
->sg_list
[0].addr
= sge
.addr
;
2326 wqe
->sg_list
[0].lkey
= sge
.lkey
;
2327 wqe
->sg_list
[0].size
= BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2
;
2328 sge
.size
-= wqe
->sg_list
[0].size
;
2330 sqp_entry
->sge
.addr
= ref
.addr
;
2331 sqp_entry
->sge
.lkey
= ref
.lkey
;
2332 sqp_entry
->sge
.size
= ref
.size
;
2333 /* Store the wrid for reporting completion */
2334 sqp_entry
->wrid
= wqe
->wr_id
;
2335 /* change the wqe->wrid to table index */
2336 wqe
->wr_id
= rq_prod_index
;
2340 static int is_ud_qp(struct bnxt_re_qp
*qp
)
2342 return (qp
->qplib_qp
.type
== CMDQ_CREATE_QP_TYPE_UD
||
2343 qp
->qplib_qp
.type
== CMDQ_CREATE_QP_TYPE_GSI
);
2346 static int bnxt_re_build_send_wqe(struct bnxt_re_qp
*qp
,
2347 const struct ib_send_wr
*wr
,
2348 struct bnxt_qplib_swqe
*wqe
)
2350 struct bnxt_re_ah
*ah
= NULL
;
2353 ah
= container_of(ud_wr(wr
)->ah
, struct bnxt_re_ah
, ib_ah
);
2354 wqe
->send
.q_key
= ud_wr(wr
)->remote_qkey
;
2355 wqe
->send
.dst_qp
= ud_wr(wr
)->remote_qpn
;
2356 wqe
->send
.avid
= ah
->qplib_ah
.id
;
2358 switch (wr
->opcode
) {
2360 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
2362 case IB_WR_SEND_WITH_IMM
:
2363 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
;
2364 wqe
->send
.imm_data
= wr
->ex
.imm_data
;
2366 case IB_WR_SEND_WITH_INV
:
2367 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
;
2368 wqe
->send
.inv_key
= wr
->ex
.invalidate_rkey
;
2373 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2374 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2375 if (wr
->send_flags
& IB_SEND_FENCE
)
2376 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2377 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2378 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2379 if (wr
->send_flags
& IB_SEND_INLINE
)
2380 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
2385 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr
*wr
,
2386 struct bnxt_qplib_swqe
*wqe
)
2388 switch (wr
->opcode
) {
2389 case IB_WR_RDMA_WRITE
:
2390 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
;
2392 case IB_WR_RDMA_WRITE_WITH_IMM
:
2393 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
;
2394 wqe
->rdma
.imm_data
= wr
->ex
.imm_data
;
2396 case IB_WR_RDMA_READ
:
2397 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_RDMA_READ
;
2398 wqe
->rdma
.inv_key
= wr
->ex
.invalidate_rkey
;
2403 wqe
->rdma
.remote_va
= rdma_wr(wr
)->remote_addr
;
2404 wqe
->rdma
.r_key
= rdma_wr(wr
)->rkey
;
2405 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2406 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2407 if (wr
->send_flags
& IB_SEND_FENCE
)
2408 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2409 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2410 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2411 if (wr
->send_flags
& IB_SEND_INLINE
)
2412 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_INLINE
;
2417 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr
*wr
,
2418 struct bnxt_qplib_swqe
*wqe
)
2420 switch (wr
->opcode
) {
2421 case IB_WR_ATOMIC_CMP_AND_SWP
:
2422 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
;
2423 wqe
->atomic
.cmp_data
= atomic_wr(wr
)->compare_add
;
2424 wqe
->atomic
.swap_data
= atomic_wr(wr
)->swap
;
2426 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2427 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
;
2428 wqe
->atomic
.cmp_data
= atomic_wr(wr
)->compare_add
;
2433 wqe
->atomic
.remote_va
= atomic_wr(wr
)->remote_addr
;
2434 wqe
->atomic
.r_key
= atomic_wr(wr
)->rkey
;
2435 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2436 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2437 if (wr
->send_flags
& IB_SEND_FENCE
)
2438 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2439 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2440 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2444 static int bnxt_re_build_inv_wqe(const struct ib_send_wr
*wr
,
2445 struct bnxt_qplib_swqe
*wqe
)
2447 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
;
2448 wqe
->local_inv
.inv_l_key
= wr
->ex
.invalidate_rkey
;
2450 /* Need unconditional fence for local invalidate
2451 * opcode to work as expected.
2453 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2455 if (wr
->send_flags
& IB_SEND_SIGNALED
)
2456 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2457 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2458 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT
;
2463 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr
*wr
,
2464 struct bnxt_qplib_swqe
*wqe
)
2466 struct bnxt_re_mr
*mr
= container_of(wr
->mr
, struct bnxt_re_mr
, ib_mr
);
2467 struct bnxt_qplib_frpl
*qplib_frpl
= &mr
->qplib_frpl
;
2468 int access
= wr
->access
;
2470 wqe
->frmr
.pbl_ptr
= (__le64
*)qplib_frpl
->hwq
.pbl_ptr
[0];
2471 wqe
->frmr
.pbl_dma_ptr
= qplib_frpl
->hwq
.pbl_dma_ptr
[0];
2472 wqe
->frmr
.page_list
= mr
->pages
;
2473 wqe
->frmr
.page_list_len
= mr
->npages
;
2474 wqe
->frmr
.levels
= qplib_frpl
->hwq
.level
;
2475 wqe
->type
= BNXT_QPLIB_SWQE_TYPE_REG_MR
;
2477 /* Need unconditional fence for reg_mr
2478 * opcode to function as expected.
2481 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE
;
2483 if (wr
->wr
.send_flags
& IB_SEND_SIGNALED
)
2484 wqe
->flags
|= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP
;
2486 if (access
& IB_ACCESS_LOCAL_WRITE
)
2487 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE
;
2488 if (access
& IB_ACCESS_REMOTE_READ
)
2489 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ
;
2490 if (access
& IB_ACCESS_REMOTE_WRITE
)
2491 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE
;
2492 if (access
& IB_ACCESS_REMOTE_ATOMIC
)
2493 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC
;
2494 if (access
& IB_ACCESS_MW_BIND
)
2495 wqe
->frmr
.access_cntl
|= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND
;
2497 wqe
->frmr
.l_key
= wr
->key
;
2498 wqe
->frmr
.length
= wr
->mr
->length
;
2499 wqe
->frmr
.pbl_pg_sz_log
= (wr
->mr
->page_size
>> PAGE_SHIFT_4K
) - 1;
2500 wqe
->frmr
.va
= wr
->mr
->iova
;
2504 static int bnxt_re_copy_inline_data(struct bnxt_re_dev
*rdev
,
2505 const struct ib_send_wr
*wr
,
2506 struct bnxt_qplib_swqe
*wqe
)
2508 /* Copy the inline data to the data field */
2513 in_data
= wqe
->inline_data
;
2514 for (i
= 0; i
< wr
->num_sge
; i
++) {
2515 sge_addr
= (void *)(unsigned long)
2516 wr
->sg_list
[i
].addr
;
2517 sge_len
= wr
->sg_list
[i
].length
;
2519 if ((sge_len
+ wqe
->inline_len
) >
2520 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
) {
2521 ibdev_err(&rdev
->ibdev
,
2522 "Inline data size requested > supported value");
2525 sge_len
= wr
->sg_list
[i
].length
;
2527 memcpy(in_data
, sge_addr
, sge_len
);
2528 in_data
+= wr
->sg_list
[i
].length
;
2529 wqe
->inline_len
+= wr
->sg_list
[i
].length
;
2531 return wqe
->inline_len
;
2534 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev
*rdev
,
2535 const struct ib_send_wr
*wr
,
2536 struct bnxt_qplib_swqe
*wqe
)
2540 if (wr
->send_flags
& IB_SEND_INLINE
)
2541 payload_sz
= bnxt_re_copy_inline_data(rdev
, wr
, wqe
);
2543 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
->sg_list
,
2549 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp
*qp
)
2551 if ((qp
->ib_qp
.qp_type
== IB_QPT_UD
||
2552 qp
->ib_qp
.qp_type
== IB_QPT_GSI
||
2553 qp
->ib_qp
.qp_type
== IB_QPT_RAW_ETHERTYPE
) &&
2554 qp
->qplib_qp
.wqe_cnt
== BNXT_RE_UD_QP_HW_STALL
) {
2556 struct ib_qp_attr qp_attr
;
2558 qp_attr_mask
= IB_QP_STATE
;
2559 qp_attr
.qp_state
= IB_QPS_RTS
;
2560 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, qp_attr_mask
, NULL
);
2561 qp
->qplib_qp
.wqe_cnt
= 0;
2565 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev
*rdev
,
2566 struct bnxt_re_qp
*qp
,
2567 const struct ib_send_wr
*wr
)
2569 int rc
= 0, payload_sz
= 0;
2570 unsigned long flags
;
2572 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2574 struct bnxt_qplib_swqe wqe
= {};
2577 wqe
.num_sge
= wr
->num_sge
;
2578 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2579 ibdev_err(&rdev
->ibdev
,
2580 "Limit exceeded for Send SGEs");
2585 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2586 if (payload_sz
< 0) {
2590 wqe
.wr_id
= wr
->wr_id
;
2592 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_SEND
;
2594 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2596 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2599 ibdev_err(&rdev
->ibdev
,
2600 "Post send failed opcode = %#x rc = %d",
2606 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2607 bnxt_ud_qp_hw_stall_workaround(qp
);
2608 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2612 int bnxt_re_post_send(struct ib_qp
*ib_qp
, const struct ib_send_wr
*wr
,
2613 const struct ib_send_wr
**bad_wr
)
2615 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2616 struct bnxt_qplib_swqe wqe
;
2617 int rc
= 0, payload_sz
= 0;
2618 unsigned long flags
;
2620 spin_lock_irqsave(&qp
->sq_lock
, flags
);
2623 memset(&wqe
, 0, sizeof(wqe
));
2626 wqe
.num_sge
= wr
->num_sge
;
2627 if (wr
->num_sge
> qp
->qplib_qp
.sq
.max_sge
) {
2628 ibdev_err(&qp
->rdev
->ibdev
,
2629 "Limit exceeded for Send SGEs");
2634 payload_sz
= bnxt_re_copy_wr_payload(qp
->rdev
, wr
, &wqe
);
2635 if (payload_sz
< 0) {
2639 wqe
.wr_id
= wr
->wr_id
;
2641 switch (wr
->opcode
) {
2643 case IB_WR_SEND_WITH_IMM
:
2644 if (qp
->qplib_qp
.type
== CMDQ_CREATE_QP1_TYPE_GSI
) {
2645 rc
= bnxt_re_build_qp1_send_v2(qp
, wr
, &wqe
,
2649 wqe
.rawqp1
.lflags
|=
2650 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC
;
2652 switch (wr
->send_flags
) {
2653 case IB_SEND_IP_CSUM
:
2654 wqe
.rawqp1
.lflags
|=
2655 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM
;
2661 case IB_WR_SEND_WITH_INV
:
2662 rc
= bnxt_re_build_send_wqe(qp
, wr
, &wqe
);
2664 case IB_WR_RDMA_WRITE
:
2665 case IB_WR_RDMA_WRITE_WITH_IMM
:
2666 case IB_WR_RDMA_READ
:
2667 rc
= bnxt_re_build_rdma_wqe(wr
, &wqe
);
2669 case IB_WR_ATOMIC_CMP_AND_SWP
:
2670 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2671 rc
= bnxt_re_build_atomic_wqe(wr
, &wqe
);
2673 case IB_WR_RDMA_READ_WITH_INV
:
2674 ibdev_err(&qp
->rdev
->ibdev
,
2675 "RDMA Read with Invalidate is not supported");
2678 case IB_WR_LOCAL_INV
:
2679 rc
= bnxt_re_build_inv_wqe(wr
, &wqe
);
2682 rc
= bnxt_re_build_reg_wqe(reg_wr(wr
), &wqe
);
2685 /* Unsupported WRs */
2686 ibdev_err(&qp
->rdev
->ibdev
,
2687 "WR (%#x) is not supported", wr
->opcode
);
2692 rc
= bnxt_qplib_post_send(&qp
->qplib_qp
, &wqe
);
2695 ibdev_err(&qp
->rdev
->ibdev
,
2696 "post_send failed op:%#x qps = %#x rc = %d\n",
2697 wr
->opcode
, qp
->qplib_qp
.state
, rc
);
2703 bnxt_qplib_post_send_db(&qp
->qplib_qp
);
2704 bnxt_ud_qp_hw_stall_workaround(qp
);
2705 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
2710 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev
*rdev
,
2711 struct bnxt_re_qp
*qp
,
2712 const struct ib_recv_wr
*wr
)
2714 struct bnxt_qplib_swqe wqe
;
2717 memset(&wqe
, 0, sizeof(wqe
));
2720 memset(&wqe
, 0, sizeof(wqe
));
2723 wqe
.num_sge
= wr
->num_sge
;
2724 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2725 ibdev_err(&rdev
->ibdev
,
2726 "Limit exceeded for Receive SGEs");
2730 bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
, wr
->num_sge
);
2731 wqe
.wr_id
= wr
->wr_id
;
2732 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2734 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2741 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2745 int bnxt_re_post_recv(struct ib_qp
*ib_qp
, const struct ib_recv_wr
*wr
,
2746 const struct ib_recv_wr
**bad_wr
)
2748 struct bnxt_re_qp
*qp
= container_of(ib_qp
, struct bnxt_re_qp
, ib_qp
);
2749 struct bnxt_qplib_swqe wqe
;
2750 int rc
= 0, payload_sz
= 0;
2751 unsigned long flags
;
2754 spin_lock_irqsave(&qp
->rq_lock
, flags
);
2757 memset(&wqe
, 0, sizeof(wqe
));
2760 wqe
.num_sge
= wr
->num_sge
;
2761 if (wr
->num_sge
> qp
->qplib_qp
.rq
.max_sge
) {
2762 ibdev_err(&qp
->rdev
->ibdev
,
2763 "Limit exceeded for Receive SGEs");
2769 payload_sz
= bnxt_re_build_sgl(wr
->sg_list
, wqe
.sg_list
,
2771 wqe
.wr_id
= wr
->wr_id
;
2772 wqe
.type
= BNXT_QPLIB_SWQE_TYPE_RECV
;
2774 if (ib_qp
->qp_type
== IB_QPT_GSI
&&
2775 qp
->qplib_qp
.type
!= CMDQ_CREATE_QP_TYPE_GSI
)
2776 rc
= bnxt_re_build_qp1_shadow_qp_recv(qp
, wr
, &wqe
,
2779 rc
= bnxt_qplib_post_recv(&qp
->qplib_qp
, &wqe
);
2785 /* Ring DB if the RQEs posted reaches a threshold value */
2786 if (++count
>= BNXT_RE_RQ_WQE_THRESHOLD
) {
2787 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2795 bnxt_qplib_post_recv_db(&qp
->qplib_qp
);
2797 spin_unlock_irqrestore(&qp
->rq_lock
, flags
);
2802 /* Completion Queues */
2803 void bnxt_re_destroy_cq(struct ib_cq
*ib_cq
, struct ib_udata
*udata
)
2805 struct bnxt_re_cq
*cq
;
2806 struct bnxt_qplib_nq
*nq
;
2807 struct bnxt_re_dev
*rdev
;
2809 cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
2811 nq
= cq
->qplib_cq
.nq
;
2813 bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2814 ib_umem_release(cq
->umem
);
2816 atomic_dec(&rdev
->cq_count
);
2821 int bnxt_re_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
2822 struct ib_udata
*udata
)
2824 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibcq
->device
, ibdev
);
2825 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
2826 struct bnxt_re_cq
*cq
= container_of(ibcq
, struct bnxt_re_cq
, ib_cq
);
2828 int cqe
= attr
->cqe
;
2829 struct bnxt_qplib_nq
*nq
= NULL
;
2830 unsigned int nq_alloc_cnt
;
2832 /* Validate CQ fields */
2833 if (cqe
< 1 || cqe
> dev_attr
->max_cq_wqes
) {
2834 ibdev_err(&rdev
->ibdev
, "Failed to create CQ -max exceeded");
2839 cq
->qplib_cq
.cq_handle
= (u64
)(unsigned long)(&cq
->qplib_cq
);
2841 entries
= roundup_pow_of_two(cqe
+ 1);
2842 if (entries
> dev_attr
->max_cq_wqes
+ 1)
2843 entries
= dev_attr
->max_cq_wqes
+ 1;
2845 cq
->qplib_cq
.sg_info
.pgsize
= PAGE_SIZE
;
2846 cq
->qplib_cq
.sg_info
.pgshft
= PAGE_SHIFT
;
2848 struct bnxt_re_cq_req req
;
2849 struct bnxt_re_ucontext
*uctx
= rdma_udata_to_drv_context(
2850 udata
, struct bnxt_re_ucontext
, ib_uctx
);
2851 if (ib_copy_from_udata(&req
, udata
, sizeof(req
))) {
2856 cq
->umem
= ib_umem_get(&rdev
->ibdev
, req
.cq_va
,
2857 entries
* sizeof(struct cq_base
),
2858 IB_ACCESS_LOCAL_WRITE
);
2859 if (IS_ERR(cq
->umem
)) {
2860 rc
= PTR_ERR(cq
->umem
);
2863 cq
->qplib_cq
.sg_info
.sghead
= cq
->umem
->sg_head
.sgl
;
2864 cq
->qplib_cq
.sg_info
.npages
= ib_umem_num_pages(cq
->umem
);
2865 cq
->qplib_cq
.sg_info
.nmap
= cq
->umem
->nmap
;
2866 cq
->qplib_cq
.dpi
= &uctx
->dpi
;
2868 cq
->max_cql
= min_t(u32
, entries
, MAX_CQL_PER_POLL
);
2869 cq
->cql
= kcalloc(cq
->max_cql
, sizeof(struct bnxt_qplib_cqe
),
2876 cq
->qplib_cq
.dpi
= &rdev
->dpi_privileged
;
2879 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2880 * used for getting the NQ index.
2882 nq_alloc_cnt
= atomic_inc_return(&rdev
->nq_alloc_cnt
);
2883 nq
= &rdev
->nq
[nq_alloc_cnt
% (rdev
->num_msix
- 1)];
2884 cq
->qplib_cq
.max_wqe
= entries
;
2885 cq
->qplib_cq
.cnq_hw_ring_id
= nq
->ring_id
;
2886 cq
->qplib_cq
.nq
= nq
;
2888 rc
= bnxt_qplib_create_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2890 ibdev_err(&rdev
->ibdev
, "Failed to create HW CQ");
2894 cq
->ib_cq
.cqe
= entries
;
2895 cq
->cq_period
= cq
->qplib_cq
.period
;
2898 atomic_inc(&rdev
->cq_count
);
2899 spin_lock_init(&cq
->cq_lock
);
2902 struct bnxt_re_cq_resp resp
;
2904 resp
.cqid
= cq
->qplib_cq
.id
;
2905 resp
.tail
= cq
->qplib_cq
.hwq
.cons
;
2906 resp
.phase
= cq
->qplib_cq
.period
;
2908 rc
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
2910 ibdev_err(&rdev
->ibdev
, "Failed to copy CQ udata");
2911 bnxt_qplib_destroy_cq(&rdev
->qplib_res
, &cq
->qplib_cq
);
2919 ib_umem_release(cq
->umem
);
2925 static u8
__req_to_ib_wc_status(u8 qstatus
)
2928 case CQ_REQ_STATUS_OK
:
2929 return IB_WC_SUCCESS
;
2930 case CQ_REQ_STATUS_BAD_RESPONSE_ERR
:
2931 return IB_WC_BAD_RESP_ERR
;
2932 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR
:
2933 return IB_WC_LOC_LEN_ERR
;
2934 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR
:
2935 return IB_WC_LOC_QP_OP_ERR
;
2936 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR
:
2937 return IB_WC_LOC_PROT_ERR
;
2938 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR
:
2939 return IB_WC_GENERAL_ERR
;
2940 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2941 return IB_WC_REM_INV_REQ_ERR
;
2942 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR
:
2943 return IB_WC_REM_ACCESS_ERR
;
2944 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR
:
2945 return IB_WC_REM_OP_ERR
;
2946 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR
:
2947 return IB_WC_RNR_RETRY_EXC_ERR
;
2948 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR
:
2949 return IB_WC_RETRY_EXC_ERR
;
2950 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2951 return IB_WC_WR_FLUSH_ERR
;
2953 return IB_WC_GENERAL_ERR
;
2958 static u8
__rawqp1_to_ib_wc_status(u8 qstatus
)
2961 case CQ_RES_RAWETH_QP1_STATUS_OK
:
2962 return IB_WC_SUCCESS
;
2963 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR
:
2964 return IB_WC_LOC_ACCESS_ERR
;
2965 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR
:
2966 return IB_WC_LOC_LEN_ERR
;
2967 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR
:
2968 return IB_WC_LOC_PROT_ERR
;
2969 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR
:
2970 return IB_WC_LOC_QP_OP_ERR
;
2971 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR
:
2972 return IB_WC_GENERAL_ERR
;
2973 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR
:
2974 return IB_WC_WR_FLUSH_ERR
;
2975 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR
:
2976 return IB_WC_WR_FLUSH_ERR
;
2978 return IB_WC_GENERAL_ERR
;
2982 static u8
__rc_to_ib_wc_status(u8 qstatus
)
2985 case CQ_RES_RC_STATUS_OK
:
2986 return IB_WC_SUCCESS
;
2987 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR
:
2988 return IB_WC_LOC_ACCESS_ERR
;
2989 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR
:
2990 return IB_WC_LOC_LEN_ERR
;
2991 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR
:
2992 return IB_WC_LOC_PROT_ERR
;
2993 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR
:
2994 return IB_WC_LOC_QP_OP_ERR
;
2995 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR
:
2996 return IB_WC_GENERAL_ERR
;
2997 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR
:
2998 return IB_WC_REM_INV_REQ_ERR
;
2999 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR
:
3000 return IB_WC_WR_FLUSH_ERR
;
3001 case CQ_RES_RC_STATUS_HW_FLUSH_ERR
:
3002 return IB_WC_WR_FLUSH_ERR
;
3004 return IB_WC_GENERAL_ERR
;
3008 static void bnxt_re_process_req_wc(struct ib_wc
*wc
, struct bnxt_qplib_cqe
*cqe
)
3010 switch (cqe
->type
) {
3011 case BNXT_QPLIB_SWQE_TYPE_SEND
:
3012 wc
->opcode
= IB_WC_SEND
;
3014 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM
:
3015 wc
->opcode
= IB_WC_SEND
;
3016 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3018 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV
:
3019 wc
->opcode
= IB_WC_SEND
;
3020 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
3022 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE
:
3023 wc
->opcode
= IB_WC_RDMA_WRITE
;
3025 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM
:
3026 wc
->opcode
= IB_WC_RDMA_WRITE
;
3027 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3029 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ
:
3030 wc
->opcode
= IB_WC_RDMA_READ
;
3032 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP
:
3033 wc
->opcode
= IB_WC_COMP_SWAP
;
3035 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD
:
3036 wc
->opcode
= IB_WC_FETCH_ADD
;
3038 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV
:
3039 wc
->opcode
= IB_WC_LOCAL_INV
;
3041 case BNXT_QPLIB_SWQE_TYPE_REG_MR
:
3042 wc
->opcode
= IB_WC_REG_MR
;
3045 wc
->opcode
= IB_WC_SEND
;
3049 wc
->status
= __req_to_ib_wc_status(cqe
->status
);
3052 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags
,
3053 u16 raweth_qp1_flags2
)
3055 bool is_ipv6
= false, is_ipv4
= false;
3057 /* raweth_qp1_flags Bit 9-6 indicates itype */
3058 if ((raweth_qp1_flags
& CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
3059 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE
)
3062 if (raweth_qp1_flags2
&
3063 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC
&&
3065 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC
) {
3066 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3067 (raweth_qp1_flags2
&
3068 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE
) ?
3069 (is_ipv6
= true) : (is_ipv4
= true);
3071 BNXT_RE_ROCEV2_IPV6_PACKET
:
3072 BNXT_RE_ROCEV2_IPV4_PACKET
);
3074 return BNXT_RE_ROCE_V1_PACKET
;
3078 static int bnxt_re_to_ib_nw_type(int nw_type
)
3080 u8 nw_hdr_type
= 0xFF;
3083 case BNXT_RE_ROCE_V1_PACKET
:
3084 nw_hdr_type
= RDMA_NETWORK_ROCE_V1
;
3086 case BNXT_RE_ROCEV2_IPV4_PACKET
:
3087 nw_hdr_type
= RDMA_NETWORK_IPV4
;
3089 case BNXT_RE_ROCEV2_IPV6_PACKET
:
3090 nw_hdr_type
= RDMA_NETWORK_IPV6
;
3096 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev
*rdev
,
3100 struct ethhdr
*eth_hdr
;
3104 tmp_buf
= (u8
*)rq_hdr_buf
;
3106 * If dest mac is not same as I/F mac, this could be a
3107 * loopback address or multicast address, check whether
3108 * it is a loopback packet
3110 if (!ether_addr_equal(tmp_buf
, rdev
->netdev
->dev_addr
)) {
3112 /* Check the ether type */
3113 eth_hdr
= (struct ethhdr
*)tmp_buf
;
3114 eth_type
= ntohs(eth_hdr
->h_proto
);
3122 struct udphdr
*udp_hdr
;
3124 len
= (eth_type
== ETH_P_IP
? sizeof(struct iphdr
) :
3125 sizeof(struct ipv6hdr
));
3126 tmp_buf
+= sizeof(struct ethhdr
) + len
;
3127 udp_hdr
= (struct udphdr
*)tmp_buf
;
3128 if (ntohs(udp_hdr
->dest
) ==
3141 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp
*gsi_qp
,
3142 struct bnxt_qplib_cqe
*cqe
)
3144 struct bnxt_re_dev
*rdev
= gsi_qp
->rdev
;
3145 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
3146 struct bnxt_re_qp
*gsi_sqp
= rdev
->gsi_ctx
.gsi_sqp
;
3147 struct bnxt_re_ah
*gsi_sah
;
3148 struct ib_send_wr
*swr
;
3149 struct ib_ud_wr udwr
;
3150 struct ib_recv_wr rwr
;
3154 dma_addr_t rq_hdr_buf_map
;
3155 dma_addr_t shrq_hdr_buf_map
;
3158 struct ib_sge s_sge
[2];
3159 struct ib_sge r_sge
[2];
3162 memset(&udwr
, 0, sizeof(udwr
));
3163 memset(&rwr
, 0, sizeof(rwr
));
3164 memset(&s_sge
, 0, sizeof(s_sge
));
3165 memset(&r_sge
, 0, sizeof(r_sge
));
3168 tbl_idx
= cqe
->wr_id
;
3170 rq_hdr_buf
= gsi_qp
->qplib_qp
.rq_hdr_buf
+
3171 (tbl_idx
* gsi_qp
->qplib_qp
.rq_hdr_buf_size
);
3172 rq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&gsi_qp
->qplib_qp
,
3175 /* Shadow QP header buffer */
3176 shrq_hdr_buf_map
= bnxt_qplib_get_qp_buf_from_index(&gsi_qp
->qplib_qp
,
3178 sqp_entry
= &rdev
->gsi_ctx
.sqp_tbl
[tbl_idx
];
3180 /* Store this cqe */
3181 memcpy(&sqp_entry
->cqe
, cqe
, sizeof(struct bnxt_qplib_cqe
));
3182 sqp_entry
->qp1_qp
= gsi_qp
;
3184 /* Find packet type from the cqe */
3186 pkt_type
= bnxt_re_check_packet_type(cqe
->raweth_qp1_flags
,
3187 cqe
->raweth_qp1_flags2
);
3189 ibdev_err(&rdev
->ibdev
, "Invalid packet\n");
3193 /* Adjust the offset for the user buffer and post in the rq */
3195 if (pkt_type
== BNXT_RE_ROCEV2_IPV4_PACKET
)
3199 * QP1 loopback packet has 4 bytes of internal header before
3200 * ether header. Skip these four bytes.
3202 if (bnxt_re_is_loopback_packet(rdev
, rq_hdr_buf
))
3205 /* First send SGE . Skip the ether header*/
3206 s_sge
[0].addr
= rq_hdr_buf_map
+ BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3208 s_sge
[0].lkey
= 0xFFFFFFFF;
3209 s_sge
[0].length
= offset
? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4
:
3210 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
;
3212 /* Second Send SGE */
3213 s_sge
[1].addr
= s_sge
[0].addr
+ s_sge
[0].length
+
3214 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE
;
3215 if (pkt_type
!= BNXT_RE_ROCE_V1_PACKET
)
3217 s_sge
[1].lkey
= 0xFFFFFFFF;
3218 s_sge
[1].length
= 256;
3220 /* First recv SGE */
3222 r_sge
[0].addr
= shrq_hdr_buf_map
;
3223 r_sge
[0].lkey
= 0xFFFFFFFF;
3224 r_sge
[0].length
= 40;
3226 r_sge
[1].addr
= sqp_entry
->sge
.addr
+ offset
;
3227 r_sge
[1].lkey
= sqp_entry
->sge
.lkey
;
3228 r_sge
[1].length
= BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6
+ 256 - offset
;
3230 /* Create receive work request */
3232 rwr
.sg_list
= r_sge
;
3233 rwr
.wr_id
= tbl_idx
;
3236 rc
= bnxt_re_post_recv_shadow_qp(rdev
, gsi_sqp
, &rwr
);
3238 ibdev_err(&rdev
->ibdev
,
3239 "Failed to post Rx buffers to shadow QP");
3244 swr
->sg_list
= s_sge
;
3245 swr
->wr_id
= tbl_idx
;
3246 swr
->opcode
= IB_WR_SEND
;
3248 gsi_sah
= rdev
->gsi_ctx
.gsi_sah
;
3249 udwr
.ah
= &gsi_sah
->ib_ah
;
3250 udwr
.remote_qpn
= gsi_sqp
->qplib_qp
.id
;
3251 udwr
.remote_qkey
= gsi_sqp
->qplib_qp
.qkey
;
3253 /* post data received in the send queue */
3254 rc
= bnxt_re_post_send_shadow_qp(rdev
, gsi_sqp
, swr
);
3259 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc
*wc
,
3260 struct bnxt_qplib_cqe
*cqe
)
3262 wc
->opcode
= IB_WC_RECV
;
3263 wc
->status
= __rawqp1_to_ib_wc_status(cqe
->status
);
3264 wc
->wc_flags
|= IB_WC_GRH
;
3267 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev
*rdev
,
3271 * Check if the vlan is configured in the host. If not configured, it
3272 * can be a transparent VLAN. So dont report the vlan id.
3274 if (!__vlan_find_dev_deep_rcu(rdev
->netdev
,
3275 htons(ETH_P_8021Q
), vlan_id
))
3280 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe
*orig_cqe
,
3287 metadata
= orig_cqe
->raweth_qp1_metadata
;
3288 if (orig_cqe
->raweth_qp1_flags2
&
3289 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN
) {
3291 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK
) >>
3292 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT
);
3293 if (tpid
== ETH_P_8021Q
) {
3295 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK
;
3297 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK
) >>
3298 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT
;
3306 static void bnxt_re_process_res_rc_wc(struct ib_wc
*wc
,
3307 struct bnxt_qplib_cqe
*cqe
)
3309 wc
->opcode
= IB_WC_RECV
;
3310 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
3312 if (cqe
->flags
& CQ_RES_RC_FLAGS_IMM
)
3313 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3314 if (cqe
->flags
& CQ_RES_RC_FLAGS_INV
)
3315 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
3316 if ((cqe
->flags
& (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
)) ==
3317 (CQ_RES_RC_FLAGS_RDMA
| CQ_RES_RC_FLAGS_IMM
))
3318 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3321 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp
*gsi_sqp
,
3323 struct bnxt_qplib_cqe
*cqe
)
3325 struct bnxt_re_dev
*rdev
= gsi_sqp
->rdev
;
3326 struct bnxt_re_qp
*gsi_qp
= NULL
;
3327 struct bnxt_qplib_cqe
*orig_cqe
= NULL
;
3328 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
3334 tbl_idx
= cqe
->wr_id
;
3336 sqp_entry
= &rdev
->gsi_ctx
.sqp_tbl
[tbl_idx
];
3337 gsi_qp
= sqp_entry
->qp1_qp
;
3338 orig_cqe
= &sqp_entry
->cqe
;
3340 wc
->wr_id
= sqp_entry
->wrid
;
3341 wc
->byte_len
= orig_cqe
->length
;
3342 wc
->qp
= &gsi_qp
->ib_qp
;
3344 wc
->ex
.imm_data
= orig_cqe
->immdata
;
3345 wc
->src_qp
= orig_cqe
->src_qp
;
3346 memcpy(wc
->smac
, orig_cqe
->smac
, ETH_ALEN
);
3347 if (bnxt_re_is_vlan_pkt(orig_cqe
, &vlan_id
, &sl
)) {
3348 if (bnxt_re_check_if_vlan_valid(rdev
, vlan_id
)) {
3349 wc
->vlan_id
= vlan_id
;
3351 wc
->wc_flags
|= IB_WC_WITH_VLAN
;
3355 wc
->vendor_err
= orig_cqe
->status
;
3357 wc
->opcode
= IB_WC_RECV
;
3358 wc
->status
= __rawqp1_to_ib_wc_status(orig_cqe
->status
);
3359 wc
->wc_flags
|= IB_WC_GRH
;
3361 nw_type
= bnxt_re_check_packet_type(orig_cqe
->raweth_qp1_flags
,
3362 orig_cqe
->raweth_qp1_flags2
);
3364 wc
->network_hdr_type
= bnxt_re_to_ib_nw_type(nw_type
);
3365 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
3369 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp
*qp
,
3371 struct bnxt_qplib_cqe
*cqe
)
3375 wc
->opcode
= IB_WC_RECV
;
3376 wc
->status
= __rc_to_ib_wc_status(cqe
->status
);
3378 if (cqe
->flags
& CQ_RES_UD_FLAGS_IMM
)
3379 wc
->wc_flags
|= IB_WC_WITH_IMM
;
3380 /* report only on GSI QP for Thor */
3381 if (qp
->qplib_qp
.type
== CMDQ_CREATE_QP_TYPE_GSI
) {
3382 wc
->wc_flags
|= IB_WC_GRH
;
3383 memcpy(wc
->smac
, cqe
->smac
, ETH_ALEN
);
3384 wc
->wc_flags
|= IB_WC_WITH_SMAC
;
3385 if (cqe
->flags
& CQ_RES_UD_FLAGS_META_FORMAT_VLAN
) {
3386 wc
->vlan_id
= (cqe
->cfa_meta
& 0xFFF);
3387 if (wc
->vlan_id
< 0x1000)
3388 wc
->wc_flags
|= IB_WC_WITH_VLAN
;
3390 nw_type
= (cqe
->flags
& CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK
) >>
3391 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT
;
3392 wc
->network_hdr_type
= bnxt_re_to_ib_nw_type(nw_type
);
3393 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
3398 static int send_phantom_wqe(struct bnxt_re_qp
*qp
)
3400 struct bnxt_qplib_qp
*lib_qp
= &qp
->qplib_qp
;
3401 unsigned long flags
;
3404 spin_lock_irqsave(&qp
->sq_lock
, flags
);
3406 rc
= bnxt_re_bind_fence_mw(lib_qp
);
3408 lib_qp
->sq
.phantom_wqe_cnt
++;
3409 ibdev_dbg(&qp
->rdev
->ibdev
,
3410 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3411 lib_qp
->id
, lib_qp
->sq
.hwq
.prod
,
3412 HWQ_CMP(lib_qp
->sq
.hwq
.prod
, &lib_qp
->sq
.hwq
),
3413 lib_qp
->sq
.phantom_wqe_cnt
);
3416 spin_unlock_irqrestore(&qp
->sq_lock
, flags
);
3420 int bnxt_re_poll_cq(struct ib_cq
*ib_cq
, int num_entries
, struct ib_wc
*wc
)
3422 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
3423 struct bnxt_re_qp
*qp
, *sh_qp
;
3424 struct bnxt_qplib_cqe
*cqe
;
3425 int i
, ncqe
, budget
;
3426 struct bnxt_qplib_q
*sq
;
3427 struct bnxt_qplib_qp
*lib_qp
;
3429 struct bnxt_re_sqp_entries
*sqp_entry
= NULL
;
3430 unsigned long flags
;
3432 spin_lock_irqsave(&cq
->cq_lock
, flags
);
3433 budget
= min_t(u32
, num_entries
, cq
->max_cql
);
3434 num_entries
= budget
;
3436 ibdev_err(&cq
->rdev
->ibdev
, "POLL CQ : no CQL to use");
3442 ncqe
= bnxt_qplib_poll_cq(&cq
->qplib_cq
, cqe
, budget
, &lib_qp
);
3445 if (sq
->send_phantom
) {
3446 qp
= container_of(lib_qp
,
3447 struct bnxt_re_qp
, qplib_qp
);
3448 if (send_phantom_wqe(qp
) == -ENOMEM
)
3449 ibdev_err(&cq
->rdev
->ibdev
,
3450 "Phantom failed! Scheduled to send again\n");
3452 sq
->send_phantom
= false;
3456 ncqe
+= bnxt_qplib_process_flush_list(&cq
->qplib_cq
,
3463 for (i
= 0; i
< ncqe
; i
++, cqe
++) {
3464 /* Transcribe each qplib_wqe back to ib_wc */
3465 memset(wc
, 0, sizeof(*wc
));
3467 wc
->wr_id
= cqe
->wr_id
;
3468 wc
->byte_len
= cqe
->length
;
3470 ((struct bnxt_qplib_qp
*)
3471 (unsigned long)(cqe
->qp_handle
),
3472 struct bnxt_re_qp
, qplib_qp
);
3474 ibdev_err(&cq
->rdev
->ibdev
, "POLL CQ : bad QP handle");
3477 wc
->qp
= &qp
->ib_qp
;
3478 wc
->ex
.imm_data
= cqe
->immdata
;
3479 wc
->src_qp
= cqe
->src_qp
;
3480 memcpy(wc
->smac
, cqe
->smac
, ETH_ALEN
);
3482 wc
->vendor_err
= cqe
->status
;
3484 switch (cqe
->opcode
) {
3485 case CQ_BASE_CQE_TYPE_REQ
:
3486 sh_qp
= qp
->rdev
->gsi_ctx
.gsi_sqp
;
3488 qp
->qplib_qp
.id
== sh_qp
->qplib_qp
.id
) {
3489 /* Handle this completion with
3490 * the stored completion
3492 memset(wc
, 0, sizeof(*wc
));
3495 bnxt_re_process_req_wc(wc
, cqe
);
3497 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1
:
3501 rc
= bnxt_re_process_raw_qp_pkt_rx
3504 memset(wc
, 0, sizeof(*wc
));
3509 /* Errors need not be looped back.
3510 * But change the wr_id to the one
3511 * stored in the table
3513 tbl_idx
= cqe
->wr_id
;
3514 sqp_entry
= &cq
->rdev
->gsi_ctx
.sqp_tbl
[tbl_idx
];
3515 wc
->wr_id
= sqp_entry
->wrid
;
3516 bnxt_re_process_res_rawqp1_wc(wc
, cqe
);
3518 case CQ_BASE_CQE_TYPE_RES_RC
:
3519 bnxt_re_process_res_rc_wc(wc
, cqe
);
3521 case CQ_BASE_CQE_TYPE_RES_UD
:
3522 sh_qp
= qp
->rdev
->gsi_ctx
.gsi_sqp
;
3524 qp
->qplib_qp
.id
== sh_qp
->qplib_qp
.id
) {
3525 /* Handle this completion with
3526 * the stored completion
3531 bnxt_re_process_res_shadow_qp_wc
3536 bnxt_re_process_res_ud_wc(qp
, wc
, cqe
);
3539 ibdev_err(&cq
->rdev
->ibdev
,
3540 "POLL CQ : type 0x%x not handled",
3549 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3550 return num_entries
- budget
;
3553 int bnxt_re_req_notify_cq(struct ib_cq
*ib_cq
,
3554 enum ib_cq_notify_flags ib_cqn_flags
)
3556 struct bnxt_re_cq
*cq
= container_of(ib_cq
, struct bnxt_re_cq
, ib_cq
);
3557 int type
= 0, rc
= 0;
3558 unsigned long flags
;
3560 spin_lock_irqsave(&cq
->cq_lock
, flags
);
3561 /* Trigger on the very next completion */
3562 if (ib_cqn_flags
& IB_CQ_NEXT_COMP
)
3563 type
= DBC_DBC_TYPE_CQ_ARMALL
;
3564 /* Trigger on the next solicited completion */
3565 else if (ib_cqn_flags
& IB_CQ_SOLICITED
)
3566 type
= DBC_DBC_TYPE_CQ_ARMSE
;
3568 /* Poll to see if there are missed events */
3569 if ((ib_cqn_flags
& IB_CQ_REPORT_MISSED_EVENTS
) &&
3570 !(bnxt_qplib_is_cq_empty(&cq
->qplib_cq
))) {
3574 bnxt_qplib_req_notify_cq(&cq
->qplib_cq
, type
);
3577 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
3581 /* Memory Regions */
3582 struct ib_mr
*bnxt_re_get_dma_mr(struct ib_pd
*ib_pd
, int mr_access_flags
)
3584 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3585 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3586 struct bnxt_re_mr
*mr
;
3590 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3592 return ERR_PTR(-ENOMEM
);
3595 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3596 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3597 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3599 /* Allocate and register 0 as the address */
3600 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3604 mr
->qplib_mr
.hwq
.level
= PBL_LVL_MAX
;
3605 mr
->qplib_mr
.total_size
= -1; /* Infinte length */
3606 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, &pbl
, 0, false,
3611 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3612 if (mr_access_flags
& (IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_REMOTE_READ
|
3613 IB_ACCESS_REMOTE_ATOMIC
))
3614 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3615 atomic_inc(&rdev
->mr_count
);
3620 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3626 int bnxt_re_dereg_mr(struct ib_mr
*ib_mr
, struct ib_udata
*udata
)
3628 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3629 struct bnxt_re_dev
*rdev
= mr
->rdev
;
3632 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3634 ibdev_err(&rdev
->ibdev
, "Dereg MR failed: %#x\n", rc
);
3639 rc
= bnxt_qplib_free_fast_reg_page_list(&rdev
->qplib_res
,
3645 ib_umem_release(mr
->ib_umem
);
3648 atomic_dec(&rdev
->mr_count
);
3652 static int bnxt_re_set_page(struct ib_mr
*ib_mr
, u64 addr
)
3654 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3656 if (unlikely(mr
->npages
== mr
->qplib_frpl
.max_pg_ptrs
))
3659 mr
->pages
[mr
->npages
++] = addr
;
3663 int bnxt_re_map_mr_sg(struct ib_mr
*ib_mr
, struct scatterlist
*sg
, int sg_nents
,
3664 unsigned int *sg_offset
)
3666 struct bnxt_re_mr
*mr
= container_of(ib_mr
, struct bnxt_re_mr
, ib_mr
);
3669 return ib_sg_to_pages(ib_mr
, sg
, sg_nents
, sg_offset
, bnxt_re_set_page
);
3672 struct ib_mr
*bnxt_re_alloc_mr(struct ib_pd
*ib_pd
, enum ib_mr_type type
,
3675 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3676 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3677 struct bnxt_re_mr
*mr
= NULL
;
3680 if (type
!= IB_MR_TYPE_MEM_REG
) {
3681 ibdev_dbg(&rdev
->ibdev
, "MR type 0x%x not supported", type
);
3682 return ERR_PTR(-EINVAL
);
3684 if (max_num_sg
> MAX_PBL_LVL_1_PGS
)
3685 return ERR_PTR(-EINVAL
);
3687 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3689 return ERR_PTR(-ENOMEM
);
3692 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3693 mr
->qplib_mr
.flags
= BNXT_QPLIB_FR_PMR
;
3694 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
;
3696 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3700 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3701 mr
->ib_mr
.rkey
= mr
->ib_mr
.lkey
;
3703 mr
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
3708 rc
= bnxt_qplib_alloc_fast_reg_page_list(&rdev
->qplib_res
,
3709 &mr
->qplib_frpl
, max_num_sg
);
3711 ibdev_err(&rdev
->ibdev
,
3712 "Failed to allocate HW FR page list");
3716 atomic_inc(&rdev
->mr_count
);
3722 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3728 struct ib_mw
*bnxt_re_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
3729 struct ib_udata
*udata
)
3731 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3732 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3733 struct bnxt_re_mw
*mw
;
3736 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
3738 return ERR_PTR(-ENOMEM
);
3740 mw
->qplib_mw
.pd
= &pd
->qplib_pd
;
3742 mw
->qplib_mw
.type
= (type
== IB_MW_TYPE_1
?
3743 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
:
3744 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
);
3745 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3747 ibdev_err(&rdev
->ibdev
, "Allocate MW failed!");
3750 mw
->ib_mw
.rkey
= mw
->qplib_mw
.rkey
;
3752 atomic_inc(&rdev
->mw_count
);
3760 int bnxt_re_dealloc_mw(struct ib_mw
*ib_mw
)
3762 struct bnxt_re_mw
*mw
= container_of(ib_mw
, struct bnxt_re_mw
, ib_mw
);
3763 struct bnxt_re_dev
*rdev
= mw
->rdev
;
3766 rc
= bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mw
->qplib_mw
);
3768 ibdev_err(&rdev
->ibdev
, "Free MW failed: %#x\n", rc
);
3773 atomic_dec(&rdev
->mw_count
);
3777 static int bnxt_re_page_size_ok(int page_shift
)
3779 switch (page_shift
) {
3780 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K
:
3781 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K
:
3782 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K
:
3783 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M
:
3784 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K
:
3785 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M
:
3786 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M
:
3787 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
:
3794 static int fill_umem_pbl_tbl(struct ib_umem
*umem
, u64
*pbl_tbl_orig
,
3797 u64
*pbl_tbl
= pbl_tbl_orig
;
3798 u64 page_size
= BIT_ULL(page_shift
);
3799 struct ib_block_iter biter
;
3801 rdma_for_each_block(umem
->sg_head
.sgl
, &biter
, umem
->nmap
, page_size
)
3802 *pbl_tbl
++ = rdma_block_iter_dma_address(&biter
);
3804 return pbl_tbl
- pbl_tbl_orig
;
3808 struct ib_mr
*bnxt_re_reg_user_mr(struct ib_pd
*ib_pd
, u64 start
, u64 length
,
3809 u64 virt_addr
, int mr_access_flags
,
3810 struct ib_udata
*udata
)
3812 struct bnxt_re_pd
*pd
= container_of(ib_pd
, struct bnxt_re_pd
, ib_pd
);
3813 struct bnxt_re_dev
*rdev
= pd
->rdev
;
3814 struct bnxt_re_mr
*mr
;
3815 struct ib_umem
*umem
;
3816 u64
*pbl_tbl
= NULL
;
3817 int umem_pgs
, page_shift
, rc
;
3819 if (length
> BNXT_RE_MAX_MR_SIZE
) {
3820 ibdev_err(&rdev
->ibdev
, "MR Size: %lld > Max supported:%lld\n",
3821 length
, BNXT_RE_MAX_MR_SIZE
);
3822 return ERR_PTR(-ENOMEM
);
3825 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
3827 return ERR_PTR(-ENOMEM
);
3830 mr
->qplib_mr
.pd
= &pd
->qplib_pd
;
3831 mr
->qplib_mr
.flags
= __from_ib_access_flags(mr_access_flags
);
3832 mr
->qplib_mr
.type
= CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR
;
3834 rc
= bnxt_qplib_alloc_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3836 ibdev_err(&rdev
->ibdev
, "Failed to allocate MR");
3839 /* The fixed portion of the rkey is the same as the lkey */
3840 mr
->ib_mr
.rkey
= mr
->qplib_mr
.rkey
;
3842 umem
= ib_umem_get(&rdev
->ibdev
, start
, length
, mr_access_flags
);
3844 ibdev_err(&rdev
->ibdev
, "Failed to get umem");
3850 mr
->qplib_mr
.va
= virt_addr
;
3851 umem_pgs
= ib_umem_page_count(umem
);
3853 ibdev_err(&rdev
->ibdev
, "umem is invalid!");
3857 mr
->qplib_mr
.total_size
= length
;
3859 pbl_tbl
= kcalloc(umem_pgs
, sizeof(u64
*), GFP_KERNEL
);
3865 page_shift
= __ffs(ib_umem_find_best_pgsz(umem
,
3866 BNXT_RE_PAGE_SIZE_4K
| BNXT_RE_PAGE_SIZE_2M
,
3869 if (!bnxt_re_page_size_ok(page_shift
)) {
3870 ibdev_err(&rdev
->ibdev
, "umem page size unsupported!");
3875 if (page_shift
== BNXT_RE_PAGE_SHIFT_4K
&&
3876 length
> BNXT_RE_MAX_MR_SIZE_LOW
) {
3877 ibdev_err(&rdev
->ibdev
, "Requested MR Sz:%llu Max sup:%llu",
3878 length
, (u64
)BNXT_RE_MAX_MR_SIZE_LOW
);
3883 /* Map umem buf ptrs to the PBL */
3884 umem_pgs
= fill_umem_pbl_tbl(umem
, pbl_tbl
, page_shift
);
3885 rc
= bnxt_qplib_reg_mr(&rdev
->qplib_res
, &mr
->qplib_mr
, pbl_tbl
,
3886 umem_pgs
, false, 1 << page_shift
);
3888 ibdev_err(&rdev
->ibdev
, "Failed to register user MR");
3894 mr
->ib_mr
.lkey
= mr
->qplib_mr
.lkey
;
3895 mr
->ib_mr
.rkey
= mr
->qplib_mr
.lkey
;
3896 atomic_inc(&rdev
->mr_count
);
3902 ib_umem_release(umem
);
3904 bnxt_qplib_free_mrw(&rdev
->qplib_res
, &mr
->qplib_mr
);
3910 int bnxt_re_alloc_ucontext(struct ib_ucontext
*ctx
, struct ib_udata
*udata
)
3912 struct ib_device
*ibdev
= ctx
->device
;
3913 struct bnxt_re_ucontext
*uctx
=
3914 container_of(ctx
, struct bnxt_re_ucontext
, ib_uctx
);
3915 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(ibdev
, ibdev
);
3916 struct bnxt_qplib_dev_attr
*dev_attr
= &rdev
->dev_attr
;
3917 struct bnxt_re_uctx_resp resp
;
3918 u32 chip_met_rev_num
= 0;
3921 ibdev_dbg(ibdev
, "ABI version requested %u", ibdev
->ops
.uverbs_abi_ver
);
3923 if (ibdev
->ops
.uverbs_abi_ver
!= BNXT_RE_ABI_VERSION
) {
3924 ibdev_dbg(ibdev
, " is different from the device %d ",
3925 BNXT_RE_ABI_VERSION
);
3931 uctx
->shpg
= (void *)__get_free_page(GFP_KERNEL
);
3936 spin_lock_init(&uctx
->sh_lock
);
3938 resp
.comp_mask
= BNXT_RE_UCNTX_CMASK_HAVE_CCTX
;
3939 chip_met_rev_num
= rdev
->chip_ctx
->chip_num
;
3940 chip_met_rev_num
|= ((u32
)rdev
->chip_ctx
->chip_rev
& 0xFF) <<
3941 BNXT_RE_CHIP_ID0_CHIP_REV_SFT
;
3942 chip_met_rev_num
|= ((u32
)rdev
->chip_ctx
->chip_metal
& 0xFF) <<
3943 BNXT_RE_CHIP_ID0_CHIP_MET_SFT
;
3944 resp
.chip_id0
= chip_met_rev_num
;
3945 /* Future extension of chip info */
3947 /*Temp, Use xa_alloc instead */
3948 resp
.dev_id
= rdev
->en_dev
->pdev
->devfn
;
3949 resp
.max_qp
= rdev
->qplib_ctx
.qpc_count
;
3950 resp
.pg_size
= PAGE_SIZE
;
3951 resp
.cqe_sz
= sizeof(struct cq_base
);
3952 resp
.max_cqd
= dev_attr
->max_cq_wqes
;
3955 rc
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
3957 ibdev_err(ibdev
, "Failed to copy user context");
3964 free_page((unsigned long)uctx
->shpg
);
3970 void bnxt_re_dealloc_ucontext(struct ib_ucontext
*ib_uctx
)
3972 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3973 struct bnxt_re_ucontext
,
3976 struct bnxt_re_dev
*rdev
= uctx
->rdev
;
3979 free_page((unsigned long)uctx
->shpg
);
3981 if (uctx
->dpi
.dbr
) {
3982 /* Free DPI only if this is the first PD allocated by the
3983 * application and mark the context dpi as NULL
3985 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
3986 &rdev
->qplib_res
.dpi_tbl
, &uctx
->dpi
);
3987 uctx
->dpi
.dbr
= NULL
;
3991 /* Helper function to mmap the virtual memory from user app */
3992 int bnxt_re_mmap(struct ib_ucontext
*ib_uctx
, struct vm_area_struct
*vma
)
3994 struct bnxt_re_ucontext
*uctx
= container_of(ib_uctx
,
3995 struct bnxt_re_ucontext
,
3997 struct bnxt_re_dev
*rdev
= uctx
->rdev
;
4000 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
4003 if (vma
->vm_pgoff
) {
4004 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
4005 if (io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
4006 PAGE_SIZE
, vma
->vm_page_prot
)) {
4007 ibdev_err(&rdev
->ibdev
, "Failed to map DPI");
4011 pfn
= virt_to_phys(uctx
->shpg
) >> PAGE_SHIFT
;
4012 if (remap_pfn_range(vma
, vma
->vm_start
,
4013 pfn
, PAGE_SIZE
, vma
->vm_page_prot
)) {
4014 ibdev_err(&rdev
->ibdev
, "Failed to map shared page");