]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/infiniband/hw/qedr/verbs.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / qedr / verbs.c
CommitLineData
ac1b36e5
RA
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
ff23dfa1 45#include <rdma/uverbs_ioctl.h>
ac1b36e5 46
be086e7c
MY
47#include <linux/qed/common_hsi.h>
48#include "qedr_hsi_rdma.h"
ac1b36e5
RA
49#include <linux/qed/qed_if.h>
50#include "qedr.h"
51#include "verbs.h"
52#include <rdma/qedr-abi.h>
99d195cc 53#include "qedr_roce_cm.h"
82af6d19 54#include "qedr_iw_cm.h"
ac1b36e5 55
3491c9e7
YB
56#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
57#define RDMA_MAX_SGE_PER_SRQ (4)
58#define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
59
a7efd777
RA
60#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61
4c6bb02d
MK
62enum {
63 QEDR_USER_MMAP_IO_WC = 0,
97f61250 64 QEDR_USER_MMAP_PHYS_PAGE,
4c6bb02d
MK
65};
66
c75d3ec8
AR
67static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68 size_t len)
69{
70 size_t min_len = min_t(size_t, len, udata->outlen);
71
72 return ib_copy_to_udata(udata, src, min_len);
73}
74
1fb7f897 75int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
a7efd777 76{
dbe30dae 77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
a7efd777
RA
78 return -EINVAL;
79
80 *pkey = QEDR_ROCE_PKEY_DEFAULT;
81 return 0;
82}
83
1fb7f897 84int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
e6a38c54
KM
85 int index, union ib_gid *sgid)
86{
87 struct qedr_dev *dev = get_qedr_dev(ibdev);
88
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91
92 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 sgid->global.interface_id, sgid->global.subnet_prefix);
94
95 return 0;
96}
97
3491c9e7
YB
98int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99{
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
102 struct qedr_srq *srq = get_qedr_srq(ibsrq);
103
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
107
108 return 0;
109}
110
ac1b36e5
RA
111int qedr_query_device(struct ib_device *ibdev,
112 struct ib_device_attr *attr, struct ib_udata *udata)
113{
114 struct qedr_dev *dev = get_qedr_dev(ibdev);
115 struct qedr_device_attr *qattr = &dev->attr;
116
117 if (!dev->rdma_ctx) {
118 DP_ERR(dev,
119 "qedr_query_device called with invalid params rdma_ctx=%p\n",
120 dev->rdma_ctx);
121 return -EINVAL;
122 }
123
124 memset(attr, 0, sizeof(*attr));
125
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 IB_DEVICE_RC_RNR_NAK_GEN |
137 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
138
06e8d1df
YB
139 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
140 attr->device_cap_flags |= IB_DEVICE_XRC;
33023fb8
SW
141 attr->max_send_sge = qattr->max_sge;
142 attr->max_recv_sge = qattr->max_sge;
ac1b36e5
RA
143 attr->max_sge_rd = qattr->max_sge;
144 attr->max_cq = qattr->max_cq;
145 attr->max_cqe = qattr->max_cqe;
146 attr->max_mr = qattr->max_mr;
147 attr->max_mw = qattr->max_mw;
148 attr->max_pd = qattr->max_pd;
149 attr->atomic_cap = dev->atomic_cap;
ac1b36e5
RA
150 attr->max_qp_init_rd_atom =
151 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
152 attr->max_qp_rd_atom =
153 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
154 attr->max_qp_init_rd_atom);
155
156 attr->max_srq = qattr->max_srq;
157 attr->max_srq_sge = qattr->max_srq_sge;
158 attr->max_srq_wr = qattr->max_srq_wr;
159
160 attr->local_ca_ack_delay = qattr->dev_ack_delay;
161 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
7d11b478 162 attr->max_pkeys = qattr->max_pkey;
ac1b36e5
RA
163 attr->max_ah = qattr->max_ah;
164
165 return 0;
166}
167
376ceb31 168static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
ac1b36e5
RA
169 u8 *ib_width)
170{
171 switch (speed) {
172 case 1000:
69054666 173 *ib_speed = IB_SPEED_SDR;
ac1b36e5
RA
174 *ib_width = IB_WIDTH_1X;
175 break;
176 case 10000:
69054666 177 *ib_speed = IB_SPEED_QDR;
ac1b36e5
RA
178 *ib_width = IB_WIDTH_1X;
179 break;
180
181 case 20000:
69054666 182 *ib_speed = IB_SPEED_DDR;
ac1b36e5
RA
183 *ib_width = IB_WIDTH_4X;
184 break;
185
186 case 25000:
69054666 187 *ib_speed = IB_SPEED_EDR;
ac1b36e5
RA
188 *ib_width = IB_WIDTH_1X;
189 break;
190
191 case 40000:
69054666 192 *ib_speed = IB_SPEED_QDR;
ac1b36e5
RA
193 *ib_width = IB_WIDTH_4X;
194 break;
195
196 case 50000:
69054666
SO
197 *ib_speed = IB_SPEED_HDR;
198 *ib_width = IB_WIDTH_1X;
ac1b36e5
RA
199 break;
200
201 case 100000:
69054666 202 *ib_speed = IB_SPEED_EDR;
ac1b36e5
RA
203 *ib_width = IB_WIDTH_4X;
204 break;
205
206 default:
207 /* Unsupported */
69054666 208 *ib_speed = IB_SPEED_SDR;
ac1b36e5
RA
209 *ib_width = IB_WIDTH_1X;
210 }
211}
212
1fb7f897
MB
213int qedr_query_port(struct ib_device *ibdev, u32 port,
214 struct ib_port_attr *attr)
ac1b36e5
RA
215{
216 struct qedr_dev *dev;
217 struct qed_rdma_port *rdma_port;
218
219 dev = get_qedr_dev(ibdev);
ac1b36e5
RA
220
221 if (!dev->rdma_ctx) {
222 DP_ERR(dev, "rdma_ctx is NULL\n");
223 return -EINVAL;
224 }
225
226 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
ac1b36e5 227
c4550c63 228 /* *attr being zeroed by the caller, avoid zeroing it here */
ac1b36e5
RA
229 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
230 attr->state = IB_PORT_ACTIVE;
72a7720f 231 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
ac1b36e5
RA
232 } else {
233 attr->state = IB_PORT_DOWN;
72a7720f 234 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
ac1b36e5
RA
235 }
236 attr->max_mtu = IB_MTU_4096;
ac1b36e5
RA
237 attr->lid = 0;
238 attr->lmc = 0;
239 attr->sm_lid = 0;
240 attr->sm_sl = 0;
2f944c0f 241 attr->ip_gids = true;
f5b1b177 242 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
cc293f54 243 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
f5b1b177 244 attr->gid_tbl_len = 1;
f5b1b177 245 } else {
cc293f54 246 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
f5b1b177
KM
247 attr->gid_tbl_len = QEDR_MAX_SGID;
248 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
249 }
ac1b36e5
RA
250 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
251 attr->qkey_viol_cntr = 0;
252 get_link_speed_and_width(rdma_port->link_speed,
253 &attr->active_speed, &attr->active_width);
254 attr->max_msg_sz = rdma_port->max_msg_size;
255 attr->max_vl_num = 4;
256
257 return 0;
258}
259
a2a074ef 260int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ac1b36e5 261{
a2a074ef 262 struct ib_device *ibdev = uctx->device;
ac1b36e5 263 int rc;
a2a074ef
LR
264 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
265 struct qedr_alloc_ucontext_resp uresp = {};
97f61250 266 struct qedr_alloc_ucontext_req ureq = {};
ac1b36e5
RA
267 struct qedr_dev *dev = get_qedr_dev(ibdev);
268 struct qed_rdma_add_user_out_params oparams;
4c6bb02d 269 struct qedr_user_mmap_entry *entry;
ac1b36e5
RA
270
271 if (!udata)
a2a074ef 272 return -EFAULT;
ac1b36e5 273
97f61250
MK
274 if (udata->inlen) {
275 rc = ib_copy_from_udata(&ureq, udata,
276 min(sizeof(ureq), udata->inlen));
277 if (rc) {
278 DP_ERR(dev, "Problem copying data from user space\n");
279 return -EFAULT;
280 }
bbe4f424
MK
281 ctx->edpm_mode = !!(ureq.context_flags &
282 QEDR_ALLOC_UCTX_EDPM_MODE);
97f61250
MK
283 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
284 }
285
ac1b36e5
RA
286 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
287 if (rc) {
288 DP_ERR(dev,
289 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
290 rc);
a2a074ef 291 return rc;
ac1b36e5
RA
292 }
293
294 ctx->dpi = oparams.dpi;
295 ctx->dpi_addr = oparams.dpi_addr;
296 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
297 ctx->dpi_size = oparams.dpi_size;
4c6bb02d
MK
298 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
299 if (!entry) {
300 rc = -ENOMEM;
301 goto err;
302 }
303
304 entry->io_address = ctx->dpi_phys_addr;
305 entry->length = ctx->dpi_size;
306 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
307 entry->dpi = ctx->dpi;
308 entry->dev = dev;
309 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
310 ctx->dpi_size);
311 if (rc) {
312 kfree(entry);
313 goto err;
314 }
315 ctx->db_mmap_entry = &entry->rdma_entry;
ac1b36e5 316
93a3d05f
MK
317 if (!dev->user_dpm_enabled)
318 uresp.dpm_flags = 0;
319 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
320 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
321 else
322 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
bbe4f424
MK
323 QEDR_DPM_TYPE_ROCE_LEGACY |
324 QEDR_DPM_TYPE_ROCE_EDPM_MODE;
93a3d05f 325
eb7f84e3
MK
326 if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
327 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
328 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
329 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
330 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
331 }
93a3d05f 332
67cbe353
AR
333 uresp.wids_enabled = 1;
334 uresp.wid_count = oparams.wid_count;
4c6bb02d 335 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
ac1b36e5
RA
336 uresp.db_size = ctx->dpi_size;
337 uresp.max_send_wr = dev->attr.max_sqe;
338 uresp.max_recv_wr = dev->attr.max_rqe;
339 uresp.max_srq_wr = dev->attr.max_srq_wr;
340 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
341 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
342 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
343 uresp.max_cqes = QEDR_MAX_CQES;
344
c75d3ec8 345 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
ac1b36e5 346 if (rc)
4c6bb02d 347 goto err;
ac1b36e5
RA
348
349 ctx->dev = dev;
350
ac1b36e5
RA
351 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
352 &ctx->ibucontext);
a2a074ef 353 return 0;
4c6bb02d
MK
354
355err:
356 if (!ctx->db_mmap_entry)
357 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
358 else
359 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
360
361 return rc;
ac1b36e5
RA
362}
363
a2a074ef 364void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
ac1b36e5
RA
365{
366 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
ac1b36e5
RA
367
368 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
369 uctx);
4c6bb02d
MK
370
371 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
ac1b36e5
RA
372}
373
4c6bb02d 374void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
ac1b36e5 375{
4c6bb02d
MK
376 struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
377 struct qedr_dev *dev = entry->dev;
ac1b36e5 378
97f61250
MK
379 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
380 free_page((unsigned long)entry->address);
381 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
4c6bb02d 382 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
30bf066c 383
4c6bb02d
MK
384 kfree(entry);
385}
ac1b36e5 386
4c6bb02d
MK
387int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
388{
389 struct ib_device *dev = ucontext->device;
390 size_t length = vma->vm_end - vma->vm_start;
391 struct rdma_user_mmap_entry *rdma_entry;
392 struct qedr_user_mmap_entry *entry;
393 int rc = 0;
394 u64 pfn;
395
396 ibdev_dbg(dev,
397 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
398 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
399
400 rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
401 if (!rdma_entry) {
402 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
403 vma->vm_pgoff);
ac1b36e5
RA
404 return -EINVAL;
405 }
4c6bb02d
MK
406 entry = get_qedr_mmap_entry(rdma_entry);
407 ibdev_dbg(dev,
408 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
409 entry->io_address, length, entry->mmap_flag);
410
411 switch (entry->mmap_flag) {
412 case QEDR_USER_MMAP_IO_WC:
413 pfn = entry->io_address >> PAGE_SHIFT;
414 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
415 pgprot_writecombine(vma->vm_page_prot),
416 rdma_entry);
417 break;
97f61250
MK
418 case QEDR_USER_MMAP_PHYS_PAGE:
419 rc = vm_insert_page(vma, vma->vm_start,
420 virt_to_page(entry->address));
421 break;
4c6bb02d
MK
422 default:
423 rc = -EINVAL;
30bf066c 424 }
ac1b36e5 425
4c6bb02d
MK
426 if (rc)
427 ibdev_dbg(dev,
428 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
429 entry->io_address, length, entry->mmap_flag, rc);
30bf066c 430
4c6bb02d
MK
431 rdma_user_mmap_entry_put(rdma_entry);
432 return rc;
ac1b36e5 433}
a7efd777 434
ff23dfa1 435int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
a7efd777 436{
21a428a0 437 struct ib_device *ibdev = ibpd->device;
a7efd777 438 struct qedr_dev *dev = get_qedr_dev(ibdev);
21a428a0 439 struct qedr_pd *pd = get_qedr_pd(ibpd);
a7efd777
RA
440 u16 pd_id;
441 int rc;
442
443 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
ff23dfa1 444 udata ? "User Lib" : "Kernel");
a7efd777
RA
445
446 if (!dev->rdma_ctx) {
847cb1a3 447 DP_ERR(dev, "invalid RDMA context\n");
21a428a0 448 return -EINVAL;
a7efd777
RA
449 }
450
9c1e0228
RA
451 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
452 if (rc)
21a428a0 453 return rc;
a7efd777 454
a7efd777
RA
455 pd->pd_id = pd_id;
456
ff23dfa1 457 if (udata) {
57939021
JG
458 struct qedr_alloc_pd_uresp uresp = {
459 .pd_id = pd_id,
460 };
ff23dfa1
SR
461 struct qedr_ucontext *context = rdma_udata_to_drv_context(
462 udata, struct qedr_ucontext, ibucontext);
9c1e0228 463
c75d3ec8 464 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
9c1e0228 465 if (rc) {
a7efd777 466 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
9c1e0228 467 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
21a428a0 468 return rc;
9c1e0228
RA
469 }
470
ff23dfa1 471 pd->uctx = context;
9c1e0228 472 pd->uctx->pd = pd;
a7efd777
RA
473 }
474
21a428a0 475 return 0;
a7efd777
RA
476}
477
91a7c58f 478int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
a7efd777
RA
479{
480 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
481 struct qedr_pd *pd = get_qedr_pd(ibpd);
482
a7efd777
RA
483 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
484 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
91a7c58f 485 return 0;
a7efd777
RA
486}
487
06e8d1df
YB
488
489int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
490{
491 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
492 struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
493
494 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
495}
496
497int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
498{
499 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
500 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
501
502 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
503 return 0;
504}
a7efd777
RA
505static void qedr_free_pbl(struct qedr_dev *dev,
506 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
507{
508 struct pci_dev *pdev = dev->pdev;
509 int i;
510
511 for (i = 0; i < pbl_info->num_pbls; i++) {
512 if (!pbl[i].va)
513 continue;
514 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
515 pbl[i].va, pbl[i].pa);
516 }
517
518 kfree(pbl);
519}
520
521#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
522#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
523
524#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
525#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
526#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
527
528static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
529 struct qedr_pbl_info *pbl_info,
530 gfp_t flags)
531{
532 struct pci_dev *pdev = dev->pdev;
533 struct qedr_pbl *pbl_table;
534 dma_addr_t *pbl_main_tbl;
535 dma_addr_t pa;
536 void *va;
537 int i;
538
539 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
540 if (!pbl_table)
541 return ERR_PTR(-ENOMEM);
542
543 for (i = 0; i < pbl_info->num_pbls; i++) {
750afb08
LC
544 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
545 flags);
a7efd777
RA
546 if (!va)
547 goto err;
548
a7efd777
RA
549 pbl_table[i].va = va;
550 pbl_table[i].pa = pa;
551 }
552
553 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
554 * the first one with physical pointers to all of the rest
555 */
556 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
557 for (i = 0; i < pbl_info->num_pbls - 1; i++)
558 pbl_main_tbl[i] = pbl_table[i + 1].pa;
559
560 return pbl_table;
561
562err:
563 for (i--; i >= 0; i--)
564 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
565 pbl_table[i].va, pbl_table[i].pa);
566
567 qedr_free_pbl(dev, pbl_info, pbl_table);
568
569 return ERR_PTR(-ENOMEM);
570}
571
572static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
573 struct qedr_pbl_info *pbl_info,
574 u32 num_pbes, int two_layer_capable)
575{
576 u32 pbl_capacity;
577 u32 pbl_size;
578 u32 num_pbls;
579
580 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
581 if (num_pbes > MAX_PBES_TWO_LAYER) {
582 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
583 num_pbes);
584 return -EINVAL;
585 }
586
587 /* calculate required pbl page size */
588 pbl_size = MIN_FW_PBL_PAGE_SIZE;
589 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
590 NUM_PBES_ON_PAGE(pbl_size);
591
592 while (pbl_capacity < num_pbes) {
593 pbl_size *= 2;
594 pbl_capacity = pbl_size / sizeof(u64);
595 pbl_capacity = pbl_capacity * pbl_capacity;
596 }
597
598 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
599 num_pbls++; /* One for the layer0 ( points to the pbls) */
600 pbl_info->two_layered = true;
601 } else {
602 /* One layered PBL */
603 num_pbls = 1;
604 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
605 roundup_pow_of_two((num_pbes * sizeof(u64))));
606 pbl_info->two_layered = false;
607 }
608
609 pbl_info->num_pbls = num_pbls;
610 pbl_info->pbl_size = pbl_size;
611 pbl_info->num_pbes = num_pbes;
612
613 DP_DEBUG(dev, QEDR_MSG_MR,
614 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
615 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
616
617 return 0;
618}
619
620static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
621 struct qedr_pbl *pbl,
e57bb6be 622 struct qedr_pbl_info *pbl_info, u32 pg_shift)
a7efd777 623{
95ad233f 624 int pbe_cnt, total_num_pbes = 0;
a7efd777 625 struct qedr_pbl *pbl_tbl;
68363052 626 struct ib_block_iter biter;
a7efd777 627 struct regpair *pbe;
a7efd777
RA
628
629 if (!pbl_info->num_pbes)
630 return;
631
632 /* If we have a two layered pbl, the first pbl points to the rest
633 * of the pbls and the first entry lays on the second pbl in the table
634 */
635 if (pbl_info->two_layered)
636 pbl_tbl = &pbl[1];
637 else
638 pbl_tbl = pbl;
639
640 pbe = (struct regpair *)pbl_tbl->va;
641 if (!pbe) {
642 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
643 return;
644 }
645
646 pbe_cnt = 0;
647
68363052
JG
648 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
649 u64 pg_addr = rdma_block_iter_dma_address(&biter);
95ad233f 650
68363052
JG
651 pbe->lo = cpu_to_le32(pg_addr);
652 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
95ad233f 653
68363052
JG
654 pbe_cnt++;
655 total_num_pbes++;
656 pbe++;
e57bb6be 657
68363052
JG
658 if (total_num_pbes == pbl_info->num_pbes)
659 return;
95ad233f 660
68363052
JG
661 /* If the given pbl is full storing the pbes, move to next pbl.
662 */
663 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
664 pbl_tbl++;
665 pbe = (struct regpair *)pbl_tbl->va;
666 pbe_cnt = 0;
a7efd777
RA
667 }
668 }
669}
670
97f61250
MK
671static int qedr_db_recovery_add(struct qedr_dev *dev,
672 void __iomem *db_addr,
673 void *db_data,
674 enum qed_db_rec_width db_width,
675 enum qed_db_rec_space db_space)
676{
677 if (!db_data) {
678 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
679 return 0;
680 }
681
682 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
683 db_width, db_space);
684}
685
686static void qedr_db_recovery_del(struct qedr_dev *dev,
687 void __iomem *db_addr,
688 void *db_data)
689{
690 if (!db_data) {
691 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
692 return;
693 }
694
695 /* Ignore return code as there is not much we can do about it. Error
696 * log will be printed inside.
697 */
698 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
699}
700
a7efd777 701static int qedr_copy_cq_uresp(struct qedr_dev *dev,
97f61250
MK
702 struct qedr_cq *cq, struct ib_udata *udata,
703 u32 db_offset)
a7efd777
RA
704{
705 struct qedr_create_cq_uresp uresp;
706 int rc;
707
708 memset(&uresp, 0, sizeof(uresp));
709
97f61250 710 uresp.db_offset = db_offset;
a7efd777 711 uresp.icid = cq->icid;
a25984f3
MK
712 if (cq->q.db_mmap_entry)
713 uresp.db_rec_addr =
714 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
a7efd777 715
c75d3ec8 716 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
a7efd777
RA
717 if (rc)
718 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
719
720 return rc;
721}
722
723static void consume_cqe(struct qedr_cq *cq)
724{
725 if (cq->latest_cqe == cq->toggle_cqe)
726 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
727
728 cq->latest_cqe = qed_chain_consume(&cq->pbl);
729}
730
731static inline int qedr_align_cq_entries(int entries)
732{
733 u64 size, aligned_size;
734
735 /* We allocate an extra entry that we don't report to the FW. */
736 size = (entries + 1) * QEDR_CQE_SIZE;
737 aligned_size = ALIGN(size, PAGE_SIZE);
738
739 return aligned_size / QEDR_CQE_SIZE;
740}
741
97f61250
MK
742static int qedr_init_user_db_rec(struct ib_udata *udata,
743 struct qedr_dev *dev, struct qedr_userq *q,
744 bool requires_db_rec)
745{
746 struct qedr_ucontext *uctx =
747 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
748 ibucontext);
749 struct qedr_user_mmap_entry *entry;
750 int rc;
751
752 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
753 if (requires_db_rec == 0 || !uctx->db_rec)
754 return 0;
755
756 /* Allocate a page for doorbell recovery, add to mmap */
757 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
758 if (!q->db_rec_data) {
759 DP_ERR(dev, "get_zeroed_page failed\n");
760 return -ENOMEM;
761 }
762
763 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
764 if (!entry)
765 goto err_free_db_data;
766
767 entry->address = q->db_rec_data;
768 entry->length = PAGE_SIZE;
769 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
770 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
771 &entry->rdma_entry,
772 PAGE_SIZE);
773 if (rc)
774 goto err_free_entry;
775
776 q->db_mmap_entry = &entry->rdma_entry;
777
778 return 0;
779
780err_free_entry:
781 kfree(entry);
782
783err_free_db_data:
784 free_page((unsigned long)q->db_rec_data);
785 q->db_rec_data = NULL;
786 return -ENOMEM;
787}
788
b0ea0fa5 789static inline int qedr_init_user_queue(struct ib_udata *udata,
a7efd777 790 struct qedr_dev *dev,
b0ea0fa5 791 struct qedr_userq *q, u64 buf_addr,
97f61250 792 size_t buf_len, bool requires_db_rec,
72b894b0 793 int access,
69ad0e7f 794 int alloc_and_init)
a7efd777 795{
e57bb6be 796 u32 fw_pages;
a7efd777
RA
797 int rc;
798
799 q->buf_addr = buf_addr;
800 q->buf_len = buf_len;
c320e527 801 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
a7efd777
RA
802 if (IS_ERR(q->umem)) {
803 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
804 PTR_ERR(q->umem));
805 return PTR_ERR(q->umem);
806 }
807
901bca71 808 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
e57bb6be 809 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
a7efd777
RA
810 if (rc)
811 goto err0;
812
69ad0e7f
KM
813 if (alloc_and_init) {
814 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
815 if (IS_ERR(q->pbl_tbl)) {
816 rc = PTR_ERR(q->pbl_tbl);
817 goto err0;
818 }
e57bb6be
RA
819 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
820 FW_PAGE_SHIFT);
69ad0e7f
KM
821 } else {
822 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
89fd2576
DC
823 if (!q->pbl_tbl) {
824 rc = -ENOMEM;
69ad0e7f 825 goto err0;
89fd2576 826 }
69ad0e7f 827 }
a7efd777 828
97f61250
MK
829 /* mmap the user address used to store doorbell data for recovery */
830 return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
a7efd777
RA
831
832err0:
833 ib_umem_release(q->umem);
69ad0e7f 834 q->umem = NULL;
a7efd777
RA
835
836 return rc;
837}
838
839static inline void qedr_init_cq_params(struct qedr_cq *cq,
840 struct qedr_ucontext *ctx,
841 struct qedr_dev *dev, int vector,
842 int chain_entries, int page_cnt,
843 u64 pbl_ptr,
844 struct qed_rdma_create_cq_in_params
845 *params)
846{
847 memset(params, 0, sizeof(*params));
848 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
849 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
850 params->cnq_id = vector;
851 params->cq_size = chain_entries - 1;
852 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
853 params->pbl_num_pages = page_cnt;
854 params->pbl_ptr = pbl_ptr;
855 params->pbl_two_level = 0;
856}
857
858static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
859{
a7efd777
RA
860 cq->db.data.agg_flags = flags;
861 cq->db.data.value = cpu_to_le32(cons);
862 writeq(cq->db.raw, cq->db_addr);
a7efd777
RA
863}
864
865int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
866{
867 struct qedr_cq *cq = get_qedr_cq(ibcq);
868 unsigned long sflags;
4dd72636
AR
869 struct qedr_dev *dev;
870
871 dev = get_qedr_dev(ibcq->device);
872
873 if (cq->destroyed) {
874 DP_ERR(dev,
875 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
876 cq, cq->icid);
877 return -EINVAL;
878 }
879
a7efd777
RA
880
881 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
882 return 0;
883
884 spin_lock_irqsave(&cq->cq_lock, sflags);
885
886 cq->arm_flags = 0;
887
888 if (flags & IB_CQ_SOLICITED)
889 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
890
891 if (flags & IB_CQ_NEXT_COMP)
892 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
893
894 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
895
896 spin_unlock_irqrestore(&cq->cq_lock, sflags);
897
898 return 0;
899}
900
e39afe3d
LR
901int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
902 struct ib_udata *udata)
a7efd777 903{
e39afe3d 904 struct ib_device *ibdev = ibcq->device;
ff23dfa1
SR
905 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
906 udata, struct qedr_ucontext, ibucontext);
a7efd777
RA
907 struct qed_rdma_destroy_cq_out_params destroy_oparams;
908 struct qed_rdma_destroy_cq_in_params destroy_iparams;
b6db3f71
AL
909 struct qed_chain_init_params chain_params = {
910 .mode = QED_CHAIN_MODE_PBL,
911 .intended_use = QED_CHAIN_USE_TO_CONSUME,
912 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
913 .elem_size = sizeof(union rdma_cqe),
914 };
a7efd777
RA
915 struct qedr_dev *dev = get_qedr_dev(ibdev);
916 struct qed_rdma_create_cq_in_params params;
e39afe3d 917 struct qedr_create_cq_ureq ureq = {};
a7efd777
RA
918 int vector = attr->comp_vector;
919 int entries = attr->cqe;
e39afe3d 920 struct qedr_cq *cq = get_qedr_cq(ibcq);
a7efd777 921 int chain_entries;
97f61250 922 u32 db_offset;
a7efd777
RA
923 int page_cnt;
924 u64 pbl_ptr;
925 u16 icid;
926 int rc;
927
928 DP_DEBUG(dev, QEDR_MSG_INIT,
929 "create_cq: called from %s. entries=%d, vector=%d\n",
930 udata ? "User Lib" : "Kernel", entries, vector);
931
1c407cb5
JG
932 if (attr->flags)
933 return -EOPNOTSUPP;
934
a7efd777
RA
935 if (entries > QEDR_MAX_CQES) {
936 DP_ERR(dev,
937 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
938 entries, QEDR_MAX_CQES);
e39afe3d 939 return -EINVAL;
a7efd777
RA
940 }
941
942 chain_entries = qedr_align_cq_entries(entries);
943 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
b6db3f71 944 chain_params.num_elems = chain_entries;
a7efd777 945
97f61250
MK
946 /* calc db offset. user will add DPI base, kernel will add db addr */
947 db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
948
a7efd777 949 if (udata) {
97f61250
MK
950 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
951 udata->inlen))) {
a7efd777
RA
952 DP_ERR(dev,
953 "create cq: problem copying data from user space\n");
954 goto err0;
955 }
956
957 if (!ureq.len) {
958 DP_ERR(dev,
959 "create cq: cannot create a cq with 0 entries\n");
960 goto err0;
961 }
962
963 cq->cq_type = QEDR_CQ_TYPE_USER;
964
b0ea0fa5 965 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
72b894b0
CH
966 ureq.len, true, IB_ACCESS_LOCAL_WRITE,
967 1);
a7efd777
RA
968 if (rc)
969 goto err0;
970
971 pbl_ptr = cq->q.pbl_tbl->pa;
972 page_cnt = cq->q.pbl_info.num_pbes;
c7eb3bce
AR
973
974 cq->ibcq.cqe = chain_entries;
97f61250 975 cq->q.db_addr = ctx->dpi_addr + db_offset;
a7efd777
RA
976 } else {
977 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
978
b6db3f71
AL
979 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
980 &chain_params);
a7efd777 981 if (rc)
97f61250 982 goto err0;
a7efd777
RA
983
984 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
985 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
c7eb3bce 986 cq->ibcq.cqe = cq->pbl.capacity;
a7efd777
RA
987 }
988
989 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
990 pbl_ptr, &params);
991
992 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
993 if (rc)
97f61250 994 goto err1;
a7efd777
RA
995
996 cq->icid = icid;
997 cq->sig = QEDR_CQ_MAGIC_NUMBER;
998 spin_lock_init(&cq->cq_lock);
999
ff23dfa1 1000 if (udata) {
97f61250
MK
1001 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1002 if (rc)
1003 goto err2;
1004
1005 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1006 &cq->q.db_rec_data->db_data,
1007 DB_REC_WIDTH_64B,
1008 DB_REC_USER);
a7efd777 1009 if (rc)
97f61250
MK
1010 goto err2;
1011
a7efd777
RA
1012 } else {
1013 /* Generate doorbell address. */
a7efd777 1014 cq->db.data.icid = cq->icid;
97f61250 1015 cq->db_addr = dev->db_addr + db_offset;
0b1eddc1 1016 cq->db.data.params = DB_AGG_CMD_MAX <<
a7efd777
RA
1017 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1018
1019 /* point to the very last element, passing it we will toggle */
1020 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1021 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1022 cq->latest_cqe = NULL;
1023 consume_cqe(cq);
1024 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
97f61250
MK
1025
1026 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1027 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1028 if (rc)
1029 goto err2;
a7efd777
RA
1030 }
1031
1032 DP_DEBUG(dev, QEDR_MSG_CQ,
1033 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1034 cq->icid, cq, params.cq_size);
1035
e39afe3d 1036 return 0;
a7efd777 1037
97f61250 1038err2:
a7efd777
RA
1039 destroy_iparams.icid = cq->icid;
1040 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1041 &destroy_oparams);
a7efd777 1042err1:
97f61250
MK
1043 if (udata) {
1044 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
a7efd777 1045 ib_umem_release(cq->q.umem);
a25984f3 1046 if (cq->q.db_mmap_entry)
97f61250
MK
1047 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1048 } else {
1049 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1050 }
a7efd777 1051err0:
e39afe3d 1052 return -EINVAL;
a7efd777
RA
1053}
1054
1055int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1056{
1057 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1058 struct qedr_cq *cq = get_qedr_cq(ibcq);
1059
1060 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1061
1062 return 0;
1063}
1064
4dd72636
AR
1065#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1066#define QEDR_DESTROY_CQ_ITER_DURATION (10)
1067
43d781b9 1068int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
a7efd777
RA
1069{
1070 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1071 struct qed_rdma_destroy_cq_out_params oparams;
1072 struct qed_rdma_destroy_cq_in_params iparams;
1073 struct qedr_cq *cq = get_qedr_cq(ibcq);
4dd72636 1074 int iter;
a7efd777 1075
942b3b2c 1076 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
a7efd777 1077
4dd72636
AR
1078 cq->destroyed = 1;
1079
a7efd777 1080 /* GSIs CQs are handled by driver, so they don't exist in the FW */
97f61250
MK
1081 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1082 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
43d781b9 1083 return 0;
97f61250 1084 }
a1211359 1085
942b3b2c 1086 iparams.icid = cq->icid;
a52c8e24 1087 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
942b3b2c 1088 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
a7efd777 1089
bdeacabd 1090 if (udata) {
a7efd777
RA
1091 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1092 ib_umem_release(cq->q.umem);
97f61250
MK
1093
1094 if (cq->q.db_rec_data) {
1095 qedr_db_recovery_del(dev, cq->q.db_addr,
1096 &cq->q.db_rec_data->db_data);
1097 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1098 }
1099 } else {
1100 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
a7efd777
RA
1101 }
1102
4dd72636
AR
1103 /* We don't want the IRQ handler to handle a non-existing CQ so we
1104 * wait until all CNQ interrupts, if any, are received. This will always
1105 * happen and will always happen very fast. If not, then a serious error
1106 * has occured. That is why we can use a long delay.
1107 * We spin for a short time so we don’t lose time on context switching
1108 * in case all the completions are handled in that span. Otherwise
1109 * we sleep for a while and check again. Since the CNQ may be
1110 * associated with (only) the current CPU we use msleep to allow the
1111 * current CPU to be freed.
1112 * The CNQ notification is increased in qedr_irq_handler().
1113 */
1114 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1115 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1116 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1117 iter--;
1118 }
1119
1120 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1121 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1122 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1123 iter--;
1124 }
1125
4dd72636
AR
1126 /* Note that we don't need to have explicit code to wait for the
1127 * completion of the event handler because it is invoked from the EQ.
1128 * Since the destroy CQ ramrod has also been received on the EQ we can
1129 * be certain that there's no event handler in process.
1130 */
43d781b9 1131 return 0;
a7efd777 1132}
cecbcddf
RA
1133
1134static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1135 struct ib_qp_attr *attr,
1136 int attr_mask,
1137 struct qed_rdma_modify_qp_in_params
1138 *qp_params)
1139{
47ec3866 1140 const struct ib_gid_attr *gid_attr;
cecbcddf 1141 enum rdma_network_type nw_type;
d8966fcd 1142 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
cecbcddf 1143 u32 ipv4_addr;
a70c0739 1144 int ret;
cecbcddf
RA
1145 int i;
1146
47ec3866 1147 gid_attr = grh->sgid_attr;
a70c0739
PP
1148 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1149 if (ret)
1150 return ret;
3e44e0ee 1151
47ec3866 1152 nw_type = rdma_gid_attr_network_type(gid_attr);
3e44e0ee
PP
1153 switch (nw_type) {
1154 case RDMA_NETWORK_IPV6:
47ec3866 1155 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
3e44e0ee
PP
1156 sizeof(qp_params->sgid));
1157 memcpy(&qp_params->dgid.bytes[0],
1158 &grh->dgid,
1159 sizeof(qp_params->dgid));
1160 qp_params->roce_mode = ROCE_V2_IPV6;
1161 SET_FIELD(qp_params->modify_flags,
1162 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1163 break;
1c15b4f2 1164 case RDMA_NETWORK_ROCE_V1:
47ec3866 1165 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
3e44e0ee
PP
1166 sizeof(qp_params->sgid));
1167 memcpy(&qp_params->dgid.bytes[0],
1168 &grh->dgid,
1169 sizeof(qp_params->dgid));
1170 qp_params->roce_mode = ROCE_V1;
1171 break;
1172 case RDMA_NETWORK_IPV4:
1173 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1174 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
47ec3866 1175 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
3e44e0ee
PP
1176 qp_params->sgid.ipv4_addr = ipv4_addr;
1177 ipv4_addr =
1178 qedr_get_ipv4_from_gid(grh->dgid.raw);
1179 qp_params->dgid.ipv4_addr = ipv4_addr;
1180 SET_FIELD(qp_params->modify_flags,
1181 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1182 qp_params->roce_mode = ROCE_V2_IPV4;
1183 break;
1c15b4f2
AH
1184 default:
1185 return -EINVAL;
cecbcddf
RA
1186 }
1187
1188 for (i = 0; i < 4; i++) {
1189 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1190 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1191 }
1192
1193 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1194 qp_params->vlan_id = 0;
1195
1196 return 0;
1197}
1198
cecbcddf 1199static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
e00b64f7
SR
1200 struct ib_qp_init_attr *attrs,
1201 struct ib_udata *udata)
cecbcddf
RA
1202{
1203 struct qedr_device_attr *qattr = &dev->attr;
1204
1205 /* QP0... attrs->qp_type == IB_QPT_GSI */
06e8d1df
YB
1206 if (attrs->qp_type != IB_QPT_RC &&
1207 attrs->qp_type != IB_QPT_GSI &&
1208 attrs->qp_type != IB_QPT_XRC_INI &&
1209 attrs->qp_type != IB_QPT_XRC_TGT) {
cecbcddf
RA
1210 DP_DEBUG(dev, QEDR_MSG_QP,
1211 "create qp: unsupported qp type=0x%x requested\n",
1212 attrs->qp_type);
bb8865f4 1213 return -EOPNOTSUPP;
cecbcddf
RA
1214 }
1215
1216 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1217 DP_ERR(dev,
1218 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1219 attrs->cap.max_send_wr, qattr->max_sqe);
1220 return -EINVAL;
1221 }
1222
1223 if (attrs->cap.max_inline_data > qattr->max_inline) {
1224 DP_ERR(dev,
1225 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1226 attrs->cap.max_inline_data, qattr->max_inline);
1227 return -EINVAL;
1228 }
1229
1230 if (attrs->cap.max_send_sge > qattr->max_sge) {
1231 DP_ERR(dev,
1232 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1233 attrs->cap.max_send_sge, qattr->max_sge);
1234 return -EINVAL;
1235 }
1236
1237 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1238 DP_ERR(dev,
1239 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1240 attrs->cap.max_recv_sge, qattr->max_sge);
1241 return -EINVAL;
1242 }
1243
06e8d1df
YB
1244 /* verify consumer QPs are not trying to use GSI QP's CQ.
1245 * TGT QP isn't associated with RQ/SQ
1246 */
1247 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
e1ad897b
KH
1248 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1249 (attrs->qp_type != IB_QPT_XRC_INI)) {
06e8d1df
YB
1250 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1251 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1252
1253 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1254 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1255 DP_ERR(dev,
1256 "create qp: consumer QP cannot use GSI CQs.\n");
1257 return -EINVAL;
1258 }
1259 }
1260
cecbcddf
RA
1261 return 0;
1262}
1263
40b173dd
YB
1264static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1265 struct qedr_srq *srq, struct ib_udata *udata)
1266{
1267 struct qedr_create_srq_uresp uresp = {};
1268 int rc;
1269
1270 uresp.srq_id = srq->srq_id;
1271
1272 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1273 if (rc)
1274 DP_ERR(dev, "create srq: problem copying data to user space\n");
1275
1276 return rc;
1277}
1278
69ad0e7f 1279static void qedr_copy_rq_uresp(struct qedr_dev *dev,
9e054b13
MK
1280 struct qedr_create_qp_uresp *uresp,
1281 struct qedr_qp *qp)
cecbcddf 1282{
69ad0e7f
KM
1283 /* iWARP requires two doorbells per RQ. */
1284 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1285 uresp->rq_db_offset =
1286 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1287 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1288 } else {
1289 uresp->rq_db_offset =
1290 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1291 }
1292
cecbcddf 1293 uresp->rq_icid = qp->icid;
a25984f3
MK
1294 if (qp->urq.db_mmap_entry)
1295 uresp->rq_db_rec_addr =
1296 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
cecbcddf
RA
1297}
1298
69ad0e7f
KM
1299static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1300 struct qedr_create_qp_uresp *uresp,
cecbcddf
RA
1301 struct qedr_qp *qp)
1302{
1303 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
69ad0e7f
KM
1304
1305 /* iWARP uses the same cid for rq and sq */
1306 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1307 uresp->sq_icid = qp->icid;
1308 else
1309 uresp->sq_icid = qp->icid + 1;
97f61250 1310
a25984f3
MK
1311 if (qp->usq.db_mmap_entry)
1312 uresp->sq_db_rec_addr =
1313 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
cecbcddf
RA
1314}
1315
1316static int qedr_copy_qp_uresp(struct qedr_dev *dev,
97f61250
MK
1317 struct qedr_qp *qp, struct ib_udata *udata,
1318 struct qedr_create_qp_uresp *uresp)
cecbcddf 1319{
cecbcddf
RA
1320 int rc;
1321
97f61250 1322 memset(uresp, 0, sizeof(*uresp));
06e8d1df
YB
1323
1324 if (qedr_qp_has_sq(qp))
1325 qedr_copy_sq_uresp(dev, uresp, qp);
1326
1327 if (qedr_qp_has_rq(qp))
1328 qedr_copy_rq_uresp(dev, uresp, qp);
cecbcddf 1329
97f61250
MK
1330 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1331 uresp->qp_id = qp->qp_id;
cecbcddf 1332
97f61250 1333 rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
cecbcddf
RA
1334 if (rc)
1335 DP_ERR(dev,
1336 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1337 qp->icid);
1338
1339 return rc;
1340}
1341
6ef793cb
PK
1342static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1343{
1344 qed_chain_reset(&qph->pbl);
1345 qph->prod = 0;
1346 qph->cons = 0;
1347 qph->wqe_cons = 0;
1348 qph->db_data.data.value = cpu_to_le16(0);
1349}
1350
df158561
AR
1351static void qedr_set_common_qp_params(struct qedr_dev *dev,
1352 struct qedr_qp *qp,
1353 struct qedr_pd *pd,
1354 struct ib_qp_init_attr *attrs)
cecbcddf 1355{
cecbcddf 1356 spin_lock_init(&qp->q_lock);
82af6d19
MK
1357 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1358 kref_init(&qp->refcnt);
1359 init_completion(&qp->iwarp_cm_comp);
60fab107 1360 init_completion(&qp->qp_rel_comp);
82af6d19 1361 }
06e8d1df 1362
df158561 1363 qp->pd = pd;
cecbcddf
RA
1364 qp->qp_type = attrs->qp_type;
1365 qp->max_inline_data = attrs->cap.max_inline_data;
cecbcddf 1366 qp->state = QED_ROCE_QP_STATE_RESET;
6ef793cb
PK
1367
1368 qp->prev_wqe_size = 0;
1369
cecbcddf 1370 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
cecbcddf 1371 qp->dev = dev;
06e8d1df 1372 if (qedr_qp_has_sq(qp)) {
6ef793cb 1373 qedr_reset_qp_hwq_info(&qp->sq);
06e8d1df
YB
1374 qp->sq.max_sges = attrs->cap.max_send_sge;
1375 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1376 DP_DEBUG(dev, QEDR_MSG_QP,
1377 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1378 qp->sq.max_sges, qp->sq_cq->icid);
1379 }
cecbcddf 1380
06e8d1df 1381 if (attrs->srq)
3491c9e7 1382 qp->srq = get_qedr_srq(attrs->srq);
06e8d1df
YB
1383
1384 if (qedr_qp_has_rq(qp)) {
6ef793cb 1385 qedr_reset_qp_hwq_info(&qp->rq);
3491c9e7
YB
1386 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1387 qp->rq.max_sges = attrs->cap.max_recv_sge;
1388 DP_DEBUG(dev, QEDR_MSG_QP,
1389 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1390 qp->rq.max_sges, qp->rq_cq->icid);
1391 }
1392
cecbcddf
RA
1393 DP_DEBUG(dev, QEDR_MSG_QP,
1394 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1395 pd->pd_id, qp->qp_type, qp->max_inline_data,
1396 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1397 DP_DEBUG(dev, QEDR_MSG_QP,
1398 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1399 qp->sq.max_sges, qp->sq_cq->icid);
cecbcddf
RA
1400}
1401
97f61250 1402static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf 1403{
06e8d1df 1404 int rc = 0;
97f61250 1405
06e8d1df
YB
1406 if (qedr_qp_has_sq(qp)) {
1407 qp->sq.db = dev->db_addr +
1408 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1409 qp->sq.db_data.data.icid = qp->icid + 1;
1410 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1411 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1412 if (rc)
1413 return rc;
1414 }
97f61250 1415
06e8d1df 1416 if (qedr_qp_has_rq(qp)) {
3491c9e7
YB
1417 qp->rq.db = dev->db_addr +
1418 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1419 qp->rq.db_data.data.icid = qp->icid;
06e8d1df
YB
1420 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1421 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1422 if (rc && qedr_qp_has_sq(qp))
1423 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
3491c9e7 1424 }
97f61250
MK
1425
1426 return rc;
3491c9e7
YB
1427}
1428
68e326de 1429static int qedr_check_srq_params(struct qedr_dev *dev,
3491c9e7
YB
1430 struct ib_srq_init_attr *attrs,
1431 struct ib_udata *udata)
1432{
1433 struct qedr_device_attr *qattr = &dev->attr;
1434
1435 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1436 DP_ERR(dev,
1437 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1438 attrs->attr.max_wr, qattr->max_srq_wr);
1439 return -EINVAL;
1440 }
1441
1442 if (attrs->attr.max_sge > qattr->max_sge) {
1443 DP_ERR(dev,
1444 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1445 attrs->attr.max_sge, qattr->max_sge);
06e8d1df
YB
1446 }
1447
1448 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1449 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
3491c9e7
YB
1450 return -EINVAL;
1451 }
1452
1453 return 0;
1454}
1455
40b173dd
YB
1456static void qedr_free_srq_user_params(struct qedr_srq *srq)
1457{
1458 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1459 ib_umem_release(srq->usrq.umem);
1460 ib_umem_release(srq->prod_umem);
1461}
1462
3491c9e7
YB
1463static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1464{
1465 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1466 struct qedr_dev *dev = srq->dev;
1467
1468 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1469
1470 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1471 hw_srq->virt_prod_pair_addr,
1472 hw_srq->phy_prod_pair_addr);
1473}
1474
b0ea0fa5 1475static int qedr_init_srq_user_params(struct ib_udata *udata,
40b173dd
YB
1476 struct qedr_srq *srq,
1477 struct qedr_create_srq_ureq *ureq,
72b894b0 1478 int access)
40b173dd
YB
1479{
1480 struct scatterlist *sg;
1481 int rc;
1482
b0ea0fa5 1483 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
72b894b0 1484 ureq->srq_len, false, access, 1);
40b173dd
YB
1485 if (rc)
1486 return rc;
1487
c320e527
MS
1488 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1489 sizeof(struct rdma_srq_producers), access);
40b173dd
YB
1490 if (IS_ERR(srq->prod_umem)) {
1491 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1492 ib_umem_release(srq->usrq.umem);
1493 DP_ERR(srq->dev,
1494 "create srq: failed ib_umem_get for producer, got %ld\n",
1495 PTR_ERR(srq->prod_umem));
1496 return PTR_ERR(srq->prod_umem);
1497 }
1498
79fbd3e1 1499 sg = srq->prod_umem->sgt_append.sgt.sgl;
40b173dd
YB
1500 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1501
1502 return 0;
1503}
1504
3491c9e7
YB
1505static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1506 struct qedr_dev *dev,
1507 struct ib_srq_init_attr *init_attr)
1508{
1509 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
b6db3f71
AL
1510 struct qed_chain_init_params params = {
1511 .mode = QED_CHAIN_MODE_PBL,
1512 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1513 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1514 .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1515 };
3491c9e7
YB
1516 dma_addr_t phy_prod_pair_addr;
1517 u32 num_elems;
1518 void *va;
1519 int rc;
1520
1521 va = dma_alloc_coherent(&dev->pdev->dev,
1522 sizeof(struct rdma_srq_producers),
1523 &phy_prod_pair_addr, GFP_KERNEL);
1524 if (!va) {
1525 DP_ERR(dev,
1526 "create srq: failed to allocate dma memory for producer\n");
1527 return -ENOMEM;
1528 }
1529
1530 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1531 hw_srq->virt_prod_pair_addr = va;
1532
1533 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
b6db3f71
AL
1534 params.num_elems = num_elems;
1535
1536 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
3491c9e7
YB
1537 if (rc)
1538 goto err0;
1539
1540 hw_srq->num_elems = num_elems;
1541
1542 return 0;
1543
1544err0:
1545 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1546 va, phy_prod_pair_addr);
1547 return rc;
1548}
1549
68e326de
LR
1550int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1551 struct ib_udata *udata)
3491c9e7
YB
1552{
1553 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1554 struct qed_rdma_create_srq_in_params in_params = {};
68e326de 1555 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
3491c9e7 1556 struct qed_rdma_create_srq_out_params out_params;
68e326de 1557 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
40b173dd 1558 struct qedr_create_srq_ureq ureq = {};
3491c9e7
YB
1559 u64 pbl_base_addr, phy_prod_pair_addr;
1560 struct qedr_srq_hwq_info *hw_srq;
1561 u32 page_cnt, page_size;
68e326de 1562 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3491c9e7
YB
1563 int rc = 0;
1564
1565 DP_DEBUG(dev, QEDR_MSG_QP,
1566 "create SRQ called from %s (pd %p)\n",
1567 (udata) ? "User lib" : "kernel", pd);
1568
652caba5
JG
1569 if (init_attr->srq_type != IB_SRQT_BASIC &&
1570 init_attr->srq_type != IB_SRQT_XRC)
1571 return -EOPNOTSUPP;
1572
68e326de 1573 rc = qedr_check_srq_params(dev, init_attr, udata);
3491c9e7 1574 if (rc)
68e326de 1575 return -EINVAL;
3491c9e7
YB
1576
1577 srq->dev = dev;
06e8d1df 1578 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
3491c9e7
YB
1579 hw_srq = &srq->hw_srq;
1580 spin_lock_init(&srq->lock);
1581
1582 hw_srq->max_wr = init_attr->attr.max_wr;
1583 hw_srq->max_sges = init_attr->attr.max_sge;
1584
89944450 1585 if (udata) {
97f61250
MK
1586 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1587 udata->inlen))) {
40b173dd
YB
1588 DP_ERR(dev,
1589 "create srq: problem copying data from user space\n");
1590 goto err0;
1591 }
1592
72b894b0 1593 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
40b173dd
YB
1594 if (rc)
1595 goto err0;
1596
1597 page_cnt = srq->usrq.pbl_info.num_pbes;
1598 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1599 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
95ad233f 1600 page_size = PAGE_SIZE;
40b173dd
YB
1601 } else {
1602 struct qed_chain *pbl;
1603
1604 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1605 if (rc)
1606 goto err0;
1607
1608 pbl = &hw_srq->pbl;
1609 page_cnt = qed_chain_get_page_cnt(pbl);
1610 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1611 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1612 page_size = QED_CHAIN_PAGE_SIZE;
1613 }
3491c9e7 1614
3491c9e7
YB
1615 in_params.pd_id = pd->pd_id;
1616 in_params.pbl_base_addr = pbl_base_addr;
1617 in_params.prod_pair_addr = phy_prod_pair_addr;
1618 in_params.num_pages = page_cnt;
1619 in_params.page_size = page_size;
06e8d1df
YB
1620 if (srq->is_xrc) {
1621 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1622 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1623
1624 in_params.is_xrc = 1;
1625 in_params.xrcd_id = xrcd->xrcd_id;
1626 in_params.cq_cid = cq->icid;
1627 }
3491c9e7
YB
1628
1629 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1630 if (rc)
1631 goto err1;
1632
1633 srq->srq_id = out_params.srq_id;
1634
40b173dd
YB
1635 if (udata) {
1636 rc = qedr_copy_srq_uresp(dev, srq, udata);
1637 if (rc)
1638 goto err2;
1639 }
1640
9fd15987 1641 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
3491c9e7
YB
1642 if (rc)
1643 goto err2;
1644
1645 DP_DEBUG(dev, QEDR_MSG_SRQ,
1646 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
68e326de 1647 return 0;
3491c9e7
YB
1648
1649err2:
1650 destroy_in_params.srq_id = srq->srq_id;
1651
1652 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1653err1:
40b173dd
YB
1654 if (udata)
1655 qedr_free_srq_user_params(srq);
1656 else
1657 qedr_free_srq_kernel_params(srq);
3491c9e7 1658err0:
68e326de 1659 return -EFAULT;
3491c9e7
YB
1660}
1661
119181d1 1662int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
3491c9e7
YB
1663{
1664 struct qed_rdma_destroy_srq_in_params in_params = {};
1665 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1666 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1667
9fd15987 1668 xa_erase_irq(&dev->srqs, srq->srq_id);
3491c9e7 1669 in_params.srq_id = srq->srq_id;
06e8d1df 1670 in_params.is_xrc = srq->is_xrc;
3491c9e7
YB
1671 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1672
e00b64f7 1673 if (ibsrq->uobject)
40b173dd
YB
1674 qedr_free_srq_user_params(srq);
1675 else
1676 qedr_free_srq_kernel_params(srq);
3491c9e7
YB
1677
1678 DP_DEBUG(dev, QEDR_MSG_SRQ,
1679 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1680 srq->srq_id);
119181d1 1681 return 0;
3491c9e7
YB
1682}
1683
1684int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1685 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1686{
1687 struct qed_rdma_modify_srq_in_params in_params = {};
1688 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1689 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1690 int rc;
1691
1692 if (attr_mask & IB_SRQ_MAX_WR) {
1693 DP_ERR(dev,
1694 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1695 attr_mask, srq);
1696 return -EINVAL;
1697 }
1698
1699 if (attr_mask & IB_SRQ_LIMIT) {
1700 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1701 DP_ERR(dev,
1702 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1703 attr->srq_limit, srq->hw_srq.max_wr);
1704 return -EINVAL;
1705 }
1706
1707 in_params.srq_id = srq->srq_id;
1708 in_params.wqe_limit = attr->srq_limit;
1709 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1710 if (rc)
1711 return rc;
1712 }
1713
1714 srq->srq_limit = attr->srq_limit;
1715
1716 DP_DEBUG(dev, QEDR_MSG_SRQ,
1717 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1718
1719 return 0;
cecbcddf
RA
1720}
1721
06e8d1df
YB
1722static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1723{
1724 switch (ib_qp_type) {
1725 case IB_QPT_RC:
1726 return QED_RDMA_QP_TYPE_RC;
1727 case IB_QPT_XRC_INI:
1728 return QED_RDMA_QP_TYPE_XRC_INI;
1729 case IB_QPT_XRC_TGT:
1730 return QED_RDMA_QP_TYPE_XRC_TGT;
1731 default:
1732 return QED_RDMA_QP_TYPE_INVAL;
1733 }
1734}
1735
cecbcddf 1736static inline void
df158561
AR
1737qedr_init_common_qp_in_params(struct qedr_dev *dev,
1738 struct qedr_pd *pd,
1739 struct qedr_qp *qp,
1740 struct ib_qp_init_attr *attrs,
1741 bool fmr_and_reserved_lkey,
1742 struct qed_rdma_create_qp_in_params *params)
cecbcddf 1743{
df158561
AR
1744 /* QP handle to be written in an async event */
1745 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1746 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1747
1748 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1749 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
06e8d1df 1750 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
df158561 1751 params->stats_queue = 0;
3491c9e7 1752
06e8d1df
YB
1753 if (pd) {
1754 params->pd = pd->pd_id;
1755 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1756 }
1757
1758 if (qedr_qp_has_sq(qp))
1759 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1760
1761 if (qedr_qp_has_rq(qp))
3491c9e7
YB
1762 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1763
06e8d1df 1764 if (qedr_qp_has_srq(qp)) {
3491c9e7
YB
1765 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1766 params->srq_id = qp->srq->srq_id;
1767 params->use_srq = true;
06e8d1df
YB
1768 } else {
1769 params->srq_id = 0;
1770 params->use_srq = false;
3491c9e7 1771 }
cecbcddf
RA
1772}
1773
df158561 1774static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf 1775{
df158561
AR
1776 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1777 "qp=%p. "
1778 "sq_addr=0x%llx, "
1779 "sq_len=%zd, "
1780 "rq_addr=0x%llx, "
1781 "rq_len=%zd"
1782 "\n",
1783 qp,
06e8d1df
YB
1784 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1785 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1786 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1787 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
df158561 1788}
cecbcddf 1789
69ad0e7f
KM
1790static inline void
1791qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1792 struct qedr_qp *qp,
1793 struct qed_rdma_create_qp_out_params *out_params)
1794{
1795 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1796 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1797
1798 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1799 &qp->usq.pbl_info, FW_PAGE_SHIFT);
40b173dd
YB
1800 if (!qp->srq) {
1801 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1802 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1803 }
69ad0e7f
KM
1804
1805 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1806 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1807}
1808
97f61250
MK
1809static void qedr_cleanup_user(struct qedr_dev *dev,
1810 struct qedr_ucontext *ctx,
1811 struct qedr_qp *qp)
df158561 1812{
06e8d1df
YB
1813 if (qedr_qp_has_sq(qp)) {
1814 ib_umem_release(qp->usq.umem);
1815 qp->usq.umem = NULL;
1816 }
cecbcddf 1817
06e8d1df
YB
1818 if (qedr_qp_has_rq(qp)) {
1819 ib_umem_release(qp->urq.umem);
1820 qp->urq.umem = NULL;
1821 }
24e412c1
MK
1822
1823 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1824 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1825 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1826 } else {
1827 kfree(qp->usq.pbl_tbl);
1828 kfree(qp->urq.pbl_tbl);
1829 }
97f61250
MK
1830
1831 if (qp->usq.db_rec_data) {
1832 qedr_db_recovery_del(dev, qp->usq.db_addr,
1833 &qp->usq.db_rec_data->db_data);
1834 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1835 }
1836
1837 if (qp->urq.db_rec_data) {
1838 qedr_db_recovery_del(dev, qp->urq.db_addr,
1839 &qp->urq.db_rec_data->db_data);
1840 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1841 }
b4bc7660
MK
1842
1843 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1844 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1845 &qp->urq.db_rec_db2_data);
cecbcddf
RA
1846}
1847
df158561
AR
1848static int qedr_create_user_qp(struct qedr_dev *dev,
1849 struct qedr_qp *qp,
1850 struct ib_pd *ibpd,
1851 struct ib_udata *udata,
1852 struct ib_qp_init_attr *attrs)
cecbcddf 1853{
df158561
AR
1854 struct qed_rdma_create_qp_in_params in_params;
1855 struct qed_rdma_create_qp_out_params out_params;
06e8d1df
YB
1856 struct qedr_create_qp_uresp uresp = {};
1857 struct qedr_create_qp_ureq ureq = {};
69ad0e7f 1858 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
06e8d1df
YB
1859 struct qedr_ucontext *ctx = NULL;
1860 struct qedr_pd *pd = NULL;
1861 int rc = 0;
cecbcddf 1862
82af6d19 1863 qp->create_type = QEDR_QP_CREATE_USER;
06e8d1df
YB
1864
1865 if (ibpd) {
1866 pd = get_qedr_pd(ibpd);
1867 ctx = pd->uctx;
df158561 1868 }
cecbcddf 1869
06e8d1df
YB
1870 if (udata) {
1871 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1872 udata->inlen));
1873 if (rc) {
1874 DP_ERR(dev, "Problem copying data from user space\n");
1875 return rc;
1876 }
1877 }
cecbcddf 1878
06e8d1df
YB
1879 if (qedr_qp_has_sq(qp)) {
1880 /* SQ - read access only (0) */
1881 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1882 ureq.sq_len, true, 0, alloc_and_init);
1883 if (rc)
1884 return rc;
1885 }
1886
1887 if (qedr_qp_has_rq(qp)) {
72b894b0 1888 /* RQ - read access only (0) */
b0ea0fa5 1889 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
72b894b0 1890 ureq.rq_len, true, 0, alloc_and_init);
40b173dd
YB
1891 if (rc)
1892 return rc;
1893 }
df158561
AR
1894
1895 memset(&in_params, 0, sizeof(in_params));
1896 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1897 in_params.qp_handle_lo = ureq.qp_handle_lo;
1898 in_params.qp_handle_hi = ureq.qp_handle_hi;
06e8d1df
YB
1899
1900 if (qp->qp_type == IB_QPT_XRC_TGT) {
1901 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1902
1903 in_params.xrcd_id = xrcd->xrcd_id;
1904 in_params.qp_handle_lo = qp->qp_id;
1905 in_params.use_srq = 1;
1906 }
1907
1908 if (qedr_qp_has_sq(qp)) {
1909 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1910 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1911 }
1912
1913 if (qedr_qp_has_rq(qp)) {
40b173dd
YB
1914 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1915 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1916 }
df158561 1917
bbe4f424
MK
1918 if (ctx)
1919 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1920
df158561
AR
1921 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1922 &in_params, &out_params);
1923
1924 if (!qp->qed_qp) {
1925 rc = -ENOMEM;
1926 goto err1;
1927 }
1928
69ad0e7f
KM
1929 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1930 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1931
df158561
AR
1932 qp->qp_id = out_params.qp_id;
1933 qp->icid = out_params.icid;
1934
06e8d1df
YB
1935 if (udata) {
1936 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1937 if (rc)
1938 goto err;
1939 }
97f61250
MK
1940
1941 /* db offset was calculated in copy_qp_uresp, now set in the user q */
06e8d1df
YB
1942 if (qedr_qp_has_sq(qp)) {
1943 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
88f16d91 1944 qp->sq.max_wr = attrs->cap.max_send_wr;
06e8d1df
YB
1945 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1946 &qp->usq.db_rec_data->db_data,
1947 DB_REC_WIDTH_32B,
1948 DB_REC_USER);
1949 if (rc)
1950 goto err;
b4bc7660
MK
1951 }
1952
06e8d1df
YB
1953 if (qedr_qp_has_rq(qp)) {
1954 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
88f16d91 1955 qp->rq.max_wr = attrs->cap.max_recv_wr;
06e8d1df
YB
1956 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1957 &qp->urq.db_rec_data->db_data,
1958 DB_REC_WIDTH_32B,
1959 DB_REC_USER);
1960 if (rc)
1961 goto err;
1962 }
df158561 1963
b4bc7660 1964 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
0191c271
AP
1965 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1966
1967 /* calculate the db_rec_db2 data since it is constant so no
1968 * need to reflect from user
1969 */
1970 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1971 qp->urq.db_rec_db2_data.data.value =
1972 cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1973
b4bc7660
MK
1974 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1975 &qp->urq.db_rec_db2_data,
1976 DB_REC_WIDTH_32B,
1977 DB_REC_USER);
1978 if (rc)
1979 goto err;
1980 }
df158561 1981 qedr_qp_user_print(dev, qp);
97f61250 1982 return rc;
df158561
AR
1983err:
1984 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1985 if (rc)
1986 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1987
1988err1:
97f61250 1989 qedr_cleanup_user(dev, ctx, qp);
df158561 1990 return rc;
cecbcddf
RA
1991}
1992
97f61250 1993static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
f5b1b177 1994{
97f61250
MK
1995 int rc;
1996
f5b1b177
KM
1997 qp->sq.db = dev->db_addr +
1998 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1999 qp->sq.db_data.data.icid = qp->icid;
2000
97f61250
MK
2001 rc = qedr_db_recovery_add(dev, qp->sq.db,
2002 &qp->sq.db_data,
2003 DB_REC_WIDTH_32B,
2004 DB_REC_KERNEL);
2005 if (rc)
2006 return rc;
2007
f5b1b177
KM
2008 qp->rq.db = dev->db_addr +
2009 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2010 qp->rq.db_data.data.icid = qp->icid;
2011 qp->rq.iwarp_db2 = dev->db_addr +
2012 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2013 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2014 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
97f61250
MK
2015
2016 rc = qedr_db_recovery_add(dev, qp->rq.db,
2017 &qp->rq.db_data,
2018 DB_REC_WIDTH_32B,
2019 DB_REC_KERNEL);
b4bc7660
MK
2020 if (rc)
2021 return rc;
2022
2023 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2024 &qp->rq.iwarp_db2_data,
2025 DB_REC_WIDTH_32B,
2026 DB_REC_KERNEL);
97f61250 2027 return rc;
f5b1b177
KM
2028}
2029
df158561
AR
2030static int
2031qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2032 struct qedr_qp *qp,
2033 struct qed_rdma_create_qp_in_params *in_params,
2034 u32 n_sq_elems, u32 n_rq_elems)
cecbcddf 2035{
df158561 2036 struct qed_rdma_create_qp_out_params out_params;
b6db3f71
AL
2037 struct qed_chain_init_params params = {
2038 .mode = QED_CHAIN_MODE_PBL,
2039 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2040 };
cecbcddf
RA
2041 int rc;
2042
b6db3f71
AL
2043 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2044 params.num_elems = n_sq_elems;
2045 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
cecbcddf 2046
b6db3f71 2047 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
df158561
AR
2048 if (rc)
2049 return rc;
cecbcddf 2050
df158561
AR
2051 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2052 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
cecbcddf 2053
b6db3f71 2054 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
35406697 2055 params.num_elems = n_rq_elems;
b6db3f71
AL
2056 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2057
2058 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
df158561
AR
2059 if (rc)
2060 return rc;
cecbcddf 2061
df158561
AR
2062 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2063 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
cecbcddf 2064
df158561
AR
2065 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2066 in_params, &out_params);
cecbcddf 2067
df158561
AR
2068 if (!qp->qed_qp)
2069 return -EINVAL;
cecbcddf 2070
df158561
AR
2071 qp->qp_id = out_params.qp_id;
2072 qp->icid = out_params.icid;
cecbcddf 2073
97f61250 2074 return qedr_set_roce_db_info(dev, qp);
f5b1b177 2075}
cecbcddf 2076
f5b1b177
KM
2077static int
2078qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2079 struct qedr_qp *qp,
2080 struct qed_rdma_create_qp_in_params *in_params,
2081 u32 n_sq_elems, u32 n_rq_elems)
2082{
2083 struct qed_rdma_create_qp_out_params out_params;
b6db3f71
AL
2084 struct qed_chain_init_params params = {
2085 .mode = QED_CHAIN_MODE_PBL,
2086 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2087 };
f5b1b177
KM
2088 int rc;
2089
2090 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2091 QEDR_SQE_ELEMENT_SIZE,
15506586 2092 QED_CHAIN_PAGE_SIZE,
f5b1b177
KM
2093 QED_CHAIN_MODE_PBL);
2094 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2095 QEDR_RQE_ELEMENT_SIZE,
15506586 2096 QED_CHAIN_PAGE_SIZE,
f5b1b177
KM
2097 QED_CHAIN_MODE_PBL);
2098
2099 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2100 in_params, &out_params);
2101
2102 if (!qp->qed_qp)
2103 return -EINVAL;
2104
2105 /* Now we allocate the chain */
f5b1b177 2106
b6db3f71
AL
2107 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2108 params.num_elems = n_sq_elems;
2109 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2110 params.ext_pbl_virt = out_params.sq_pbl_virt;
2111 params.ext_pbl_phys = out_params.sq_pbl_phys;
f5b1b177 2112
b6db3f71 2113 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
f5b1b177
KM
2114 if (rc)
2115 goto err;
2116
b6db3f71
AL
2117 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2118 params.num_elems = n_rq_elems;
2119 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2120 params.ext_pbl_virt = out_params.rq_pbl_virt;
2121 params.ext_pbl_phys = out_params.rq_pbl_phys;
f5b1b177 2122
b6db3f71 2123 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
f5b1b177
KM
2124 if (rc)
2125 goto err;
2126
2127 qp->qp_id = out_params.qp_id;
2128 qp->icid = out_params.icid;
2129
97f61250 2130 return qedr_set_iwarp_db_info(dev, qp);
f5b1b177
KM
2131
2132err:
2133 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2134
2135 return rc;
cecbcddf
RA
2136}
2137
df158561 2138static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf 2139{
df158561
AR
2140 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2141 kfree(qp->wqe_wr_id);
cecbcddf 2142
df158561
AR
2143 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2144 kfree(qp->rqe_wr_id);
97f61250
MK
2145
2146 /* GSI qp is not registered to db mechanism so no need to delete */
2147 if (qp->qp_type == IB_QPT_GSI)
2148 return;
2149
2150 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2151
b4bc7660 2152 if (!qp->srq) {
97f61250 2153 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
b4bc7660
MK
2154
2155 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2156 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2157 &qp->rq.iwarp_db2_data);
2158 }
cecbcddf
RA
2159}
2160
df158561
AR
2161static int qedr_create_kernel_qp(struct qedr_dev *dev,
2162 struct qedr_qp *qp,
2163 struct ib_pd *ibpd,
2164 struct ib_qp_init_attr *attrs)
cecbcddf 2165{
df158561
AR
2166 struct qed_rdma_create_qp_in_params in_params;
2167 struct qedr_pd *pd = get_qedr_pd(ibpd);
2168 int rc = -EINVAL;
2169 u32 n_rq_elems;
2170 u32 n_sq_elems;
2171 u32 n_sq_entries;
cecbcddf 2172
df158561 2173 memset(&in_params, 0, sizeof(in_params));
82af6d19 2174 qp->create_type = QEDR_QP_CREATE_KERNEL;
cecbcddf 2175
df158561
AR
2176 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2177 * the ring. The ring should allow at least a single WR, even if the
2178 * user requested none, due to allocation issues.
2179 * We should add an extra WR since the prod and cons indices of
2180 * wqe_wr_id are managed in such a way that the WQ is considered full
2181 * when (prod+1)%max_wr==cons. We currently don't do that because we
2182 * double the number of entries due an iSER issue that pushes far more
2183 * WRs than indicated. If we decline its ib_post_send() then we get
2184 * error prints in the dmesg we'd like to avoid.
2185 */
2186 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2187 dev->attr.max_sqe);
cecbcddf 2188
6396bb22 2189 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
df158561
AR
2190 GFP_KERNEL);
2191 if (!qp->wqe_wr_id) {
2192 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2193 return -ENOMEM;
2194 }
cecbcddf 2195
df158561
AR
2196 /* QP handle to be written in CQE */
2197 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2198 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
cecbcddf 2199
df158561
AR
2200 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2201 * the ring. There ring should allow at least a single WR, even if the
2202 * user requested none, due to allocation issues.
2203 */
2204 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
cecbcddf 2205
df158561 2206 /* Allocate driver internal RQ array */
6396bb22 2207 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
df158561
AR
2208 GFP_KERNEL);
2209 if (!qp->rqe_wr_id) {
2210 DP_ERR(dev,
2211 "create qp: failed RQ shadow memory allocation\n");
2212 kfree(qp->wqe_wr_id);
2213 return -ENOMEM;
cecbcddf
RA
2214 }
2215
df158561 2216 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
cecbcddf 2217
df158561
AR
2218 n_sq_entries = attrs->cap.max_send_wr;
2219 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2220 n_sq_entries = max_t(u32, n_sq_entries, 1);
2221 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
cecbcddf 2222
df158561
AR
2223 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2224
f5b1b177
KM
2225 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2226 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2227 n_sq_elems, n_rq_elems);
2228 else
2229 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2230 n_sq_elems, n_rq_elems);
df158561
AR
2231 if (rc)
2232 qedr_cleanup_kernel(dev, qp);
cecbcddf
RA
2233
2234 return rc;
2235}
2236
3e45410f
KS
2237static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2238 struct ib_udata *udata)
2239{
2240 struct qedr_ucontext *ctx =
2241 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2242 ibucontext);
2243 int rc;
2244
2245 if (qp->qp_type != IB_QPT_GSI) {
2246 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2247 if (rc)
2248 return rc;
2249 }
2250
2251 if (qp->create_type == QEDR_QP_CREATE_USER)
2252 qedr_cleanup_user(dev, ctx, qp);
2253 else
2254 qedr_cleanup_kernel(dev, qp);
2255
2256 return 0;
2257}
2258
514aee66
LR
2259int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2260 struct ib_udata *udata)
cecbcddf 2261{
06e8d1df 2262 struct qedr_xrcd *xrcd = NULL;
514aee66
LR
2263 struct ib_pd *ibpd = ibqp->pd;
2264 struct qedr_pd *pd = get_qedr_pd(ibpd);
2265 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2266 struct qedr_qp *qp = get_qedr_qp(ibqp);
cecbcddf
RA
2267 int rc = 0;
2268
1f11a761 2269 if (attrs->create_flags)
514aee66 2270 return -EOPNOTSUPP;
1f11a761 2271
514aee66 2272 if (attrs->qp_type == IB_QPT_XRC_TGT)
06e8d1df 2273 xrcd = get_qedr_xrcd(attrs->xrcd);
514aee66 2274 else
06e8d1df 2275 pd = get_qedr_pd(ibpd);
06e8d1df 2276
cecbcddf
RA
2277 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2278 udata ? "user library" : "kernel", pd);
2279
e00b64f7 2280 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
cecbcddf 2281 if (rc)
514aee66 2282 return rc;
cecbcddf 2283
cecbcddf 2284 DP_DEBUG(dev, QEDR_MSG_QP,
df158561
AR
2285 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2286 udata ? "user library" : "kernel", attrs->event_handler, pd,
cecbcddf
RA
2287 get_qedr_cq(attrs->send_cq),
2288 get_qedr_cq(attrs->send_cq)->icid,
2289 get_qedr_cq(attrs->recv_cq),
3491c9e7 2290 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
cecbcddf 2291
df158561 2292 qedr_set_common_qp_params(dev, qp, pd, attrs);
cecbcddf 2293
514aee66
LR
2294 if (attrs->qp_type == IB_QPT_GSI)
2295 return qedr_create_gsi_qp(dev, attrs, qp);
04886779 2296
06e8d1df 2297 if (udata || xrcd)
df158561
AR
2298 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2299 else
2300 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
cecbcddf 2301
df158561 2302 if (rc)
514aee66 2303 return rc;
cecbcddf 2304
cecbcddf
RA
2305 qp->ibqp.qp_num = qp->qp_id;
2306
1212767e 2307 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
5fdff18b 2308 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
1212767e 2309 if (rc)
3e45410f 2310 goto out_free_qp_resources;
1212767e 2311 }
de0089e6 2312
514aee66 2313 return 0;
cecbcddf 2314
3e45410f
KS
2315out_free_qp_resources:
2316 qedr_free_qp_resources(dev, qp, udata);
514aee66 2317 return -EFAULT;
cecbcddf
RA
2318}
2319
27a4b1a6 2320static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
cecbcddf
RA
2321{
2322 switch (qp_state) {
2323 case QED_ROCE_QP_STATE_RESET:
2324 return IB_QPS_RESET;
2325 case QED_ROCE_QP_STATE_INIT:
2326 return IB_QPS_INIT;
2327 case QED_ROCE_QP_STATE_RTR:
2328 return IB_QPS_RTR;
2329 case QED_ROCE_QP_STATE_RTS:
2330 return IB_QPS_RTS;
2331 case QED_ROCE_QP_STATE_SQD:
2332 return IB_QPS_SQD;
2333 case QED_ROCE_QP_STATE_ERR:
2334 return IB_QPS_ERR;
2335 case QED_ROCE_QP_STATE_SQE:
2336 return IB_QPS_SQE;
2337 }
2338 return IB_QPS_ERR;
2339}
2340
27a4b1a6
RA
2341static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2342 enum ib_qp_state qp_state)
cecbcddf
RA
2343{
2344 switch (qp_state) {
2345 case IB_QPS_RESET:
2346 return QED_ROCE_QP_STATE_RESET;
2347 case IB_QPS_INIT:
2348 return QED_ROCE_QP_STATE_INIT;
2349 case IB_QPS_RTR:
2350 return QED_ROCE_QP_STATE_RTR;
2351 case IB_QPS_RTS:
2352 return QED_ROCE_QP_STATE_RTS;
2353 case IB_QPS_SQD:
2354 return QED_ROCE_QP_STATE_SQD;
2355 case IB_QPS_ERR:
2356 return QED_ROCE_QP_STATE_ERR;
2357 default:
2358 return QED_ROCE_QP_STATE_ERR;
2359 }
2360}
2361
cecbcddf
RA
2362static int qedr_update_qp_state(struct qedr_dev *dev,
2363 struct qedr_qp *qp,
caf61b1b 2364 enum qed_roce_qp_state cur_state,
cecbcddf
RA
2365 enum qed_roce_qp_state new_state)
2366{
2367 int status = 0;
2368
caf61b1b 2369 if (new_state == cur_state)
865cea40 2370 return 0;
cecbcddf 2371
caf61b1b 2372 switch (cur_state) {
cecbcddf
RA
2373 case QED_ROCE_QP_STATE_RESET:
2374 switch (new_state) {
2375 case QED_ROCE_QP_STATE_INIT:
cecbcddf
RA
2376 break;
2377 default:
2378 status = -EINVAL;
2379 break;
790b57f6 2380 }
cecbcddf
RA
2381 break;
2382 case QED_ROCE_QP_STATE_INIT:
2383 switch (new_state) {
2384 case QED_ROCE_QP_STATE_RTR:
2385 /* Update doorbell (in case post_recv was
2386 * done before move to RTR)
2387 */
f5b1b177
KM
2388
2389 if (rdma_protocol_roce(&dev->ibdev, 1)) {
f5b1b177 2390 writel(qp->rq.db_data.raw, qp->rq.db);
f5b1b177 2391 }
cecbcddf
RA
2392 break;
2393 case QED_ROCE_QP_STATE_ERR:
2394 break;
2395 default:
2396 /* Invalid state change. */
2397 status = -EINVAL;
2398 break;
790b57f6 2399 }
cecbcddf
RA
2400 break;
2401 case QED_ROCE_QP_STATE_RTR:
2402 /* RTR->XXX */
2403 switch (new_state) {
2404 case QED_ROCE_QP_STATE_RTS:
2405 break;
2406 case QED_ROCE_QP_STATE_ERR:
2407 break;
2408 default:
2409 /* Invalid state change. */
2410 status = -EINVAL;
2411 break;
790b57f6 2412 }
cecbcddf
RA
2413 break;
2414 case QED_ROCE_QP_STATE_RTS:
2415 /* RTS->XXX */
2416 switch (new_state) {
2417 case QED_ROCE_QP_STATE_SQD:
2418 break;
2419 case QED_ROCE_QP_STATE_ERR:
2420 break;
2421 default:
2422 /* Invalid state change. */
2423 status = -EINVAL;
2424 break;
790b57f6 2425 }
cecbcddf
RA
2426 break;
2427 case QED_ROCE_QP_STATE_SQD:
2428 /* SQD->XXX */
2429 switch (new_state) {
2430 case QED_ROCE_QP_STATE_RTS:
2431 case QED_ROCE_QP_STATE_ERR:
2432 break;
2433 default:
2434 /* Invalid state change. */
2435 status = -EINVAL;
2436 break;
790b57f6 2437 }
cecbcddf
RA
2438 break;
2439 case QED_ROCE_QP_STATE_ERR:
2440 /* ERR->XXX */
2441 switch (new_state) {
2442 case QED_ROCE_QP_STATE_RESET:
933e6dca
RA
2443 if ((qp->rq.prod != qp->rq.cons) ||
2444 (qp->sq.prod != qp->sq.cons)) {
2445 DP_NOTICE(dev,
2446 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2447 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2448 qp->sq.cons);
2449 status = -EINVAL;
2450 }
cecbcddf
RA
2451 break;
2452 default:
2453 status = -EINVAL;
2454 break;
790b57f6 2455 }
cecbcddf
RA
2456 break;
2457 default:
2458 status = -EINVAL;
2459 break;
790b57f6 2460 }
cecbcddf
RA
2461
2462 return status;
2463}
2464
2465int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2466 int attr_mask, struct ib_udata *udata)
2467{
2468 struct qedr_qp *qp = get_qedr_qp(ibqp);
2469 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2470 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
d8966fcd 2471 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
cecbcddf 2472 enum ib_qp_state old_qp_state, new_qp_state;
caf61b1b 2473 enum qed_roce_qp_state cur_state;
cecbcddf
RA
2474 int rc = 0;
2475
2476 DP_DEBUG(dev, QEDR_MSG_QP,
2477 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2478 attr->qp_state);
2479
26e990ba
JG
2480 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2481 return -EOPNOTSUPP;
2482
cecbcddf
RA
2483 old_qp_state = qedr_get_ibqp_state(qp->state);
2484 if (attr_mask & IB_QP_STATE)
2485 new_qp_state = attr->qp_state;
2486 else
2487 new_qp_state = old_qp_state;
2488
f5b1b177
KM
2489 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
d31131bb 2491 ibqp->qp_type, attr_mask)) {
f5b1b177
KM
2492 DP_ERR(dev,
2493 "modify qp: invalid attribute mask=0x%x specified for\n"
2494 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495 attr_mask, qp->qp_id, ibqp->qp_type,
2496 old_qp_state, new_qp_state);
2497 rc = -EINVAL;
2498 goto err;
2499 }
cecbcddf
RA
2500 }
2501
2502 /* Translate the masks... */
2503 if (attr_mask & IB_QP_STATE) {
2504 SET_FIELD(qp_params.modify_flags,
2505 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2507 }
2508
2509 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510 qp_params.sqd_async = true;
2511
2512 if (attr_mask & IB_QP_PKEY_INDEX) {
2513 SET_FIELD(qp_params.modify_flags,
2514 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2516 rc = -EINVAL;
2517 goto err;
2518 }
2519
2520 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2521 }
2522
2523 if (attr_mask & IB_QP_QKEY)
2524 qp->qkey = attr->qkey;
2525
2526 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527 SET_FIELD(qp_params.modify_flags,
2528 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530 IB_ACCESS_REMOTE_READ;
2531 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532 IB_ACCESS_REMOTE_WRITE;
2533 qp_params.incoming_atomic_en = attr->qp_access_flags &
2534 IB_ACCESS_REMOTE_ATOMIC;
2535 }
2536
2537 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
425cf5c1
KM
2538 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2539 return -EINVAL;
2540
cecbcddf
RA
2541 if (attr_mask & IB_QP_PATH_MTU) {
2542 if (attr->path_mtu < IB_MTU_256 ||
2543 attr->path_mtu > IB_MTU_4096) {
2544 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2545 rc = -EINVAL;
2546 goto err;
2547 }
2548 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549 ib_mtu_enum_to_int(iboe_get_mtu
2550 (dev->ndev->mtu)));
2551 }
2552
2553 if (!qp->mtu) {
2554 qp->mtu =
2555 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2557 }
2558
2559 SET_FIELD(qp_params.modify_flags,
2560 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2561
d8966fcd
DC
2562 qp_params.traffic_class_tos = grh->traffic_class;
2563 qp_params.flow_label = grh->flow_label;
2564 qp_params.hop_limit_ttl = grh->hop_limit;
cecbcddf 2565
d8966fcd 2566 qp->sgid_idx = grh->sgid_index;
cecbcddf
RA
2567
2568 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2569 if (rc) {
2570 DP_ERR(dev,
2571 "modify qp: problems with GID index %d (rc=%d)\n",
d8966fcd 2572 grh->sgid_index, rc);
cecbcddf
RA
2573 return rc;
2574 }
2575
2576 rc = qedr_get_dmac(dev, &attr->ah_attr,
2577 qp_params.remote_mac_addr);
2578 if (rc)
2579 return rc;
2580
2581 qp_params.use_local_mac = true;
2582 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2583
2584 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591 qp_params.remote_mac_addr);
cecbcddf
RA
2592
2593 qp_params.mtu = qp->mtu;
2594 qp_params.lb_indication = false;
2595 }
2596
2597 if (!qp_params.mtu) {
2598 /* Stay with current MTU */
2599 if (qp->mtu)
2600 qp_params.mtu = qp->mtu;
2601 else
2602 qp_params.mtu =
2603 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2604 }
2605
2606 if (attr_mask & IB_QP_TIMEOUT) {
2607 SET_FIELD(qp_params.modify_flags,
2608 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2609
c3594f22
KM
2610 /* The received timeout value is an exponent used like this:
2611 * "12.7.34 LOCAL ACK TIMEOUT
2612 * Value representing the transport (ACK) timeout for use by
2613 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2614 * The FW expects timeout in msec so we need to divide the usec
2615 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617 * The value of zero means infinite so we use a 'max_t' to make
2618 * sure that sub 1 msec values will be configured as 1 msec.
2619 */
2620 if (attr->timeout)
2621 qp_params.ack_timeout =
2622 1 << max_t(int, attr->timeout - 8, 0);
2623 else
cecbcddf 2624 qp_params.ack_timeout = 0;
cecbcddf 2625 }
c3594f22 2626
cecbcddf
RA
2627 if (attr_mask & IB_QP_RETRY_CNT) {
2628 SET_FIELD(qp_params.modify_flags,
2629 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2630 qp_params.retry_cnt = attr->retry_cnt;
2631 }
2632
2633 if (attr_mask & IB_QP_RNR_RETRY) {
2634 SET_FIELD(qp_params.modify_flags,
2635 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2636 qp_params.rnr_retry_cnt = attr->rnr_retry;
2637 }
2638
2639 if (attr_mask & IB_QP_RQ_PSN) {
2640 SET_FIELD(qp_params.modify_flags,
2641 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2642 qp_params.rq_psn = attr->rq_psn;
2643 qp->rq_psn = attr->rq_psn;
2644 }
2645
2646 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2647 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2648 rc = -EINVAL;
2649 DP_ERR(dev,
2650 "unsupported max_rd_atomic=%d, supported=%d\n",
2651 attr->max_rd_atomic,
2652 dev->attr.max_qp_req_rd_atomic_resc);
2653 goto err;
2654 }
2655
2656 SET_FIELD(qp_params.modify_flags,
2657 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2658 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2659 }
2660
2661 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2662 SET_FIELD(qp_params.modify_flags,
2663 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2664 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2665 }
2666
2667 if (attr_mask & IB_QP_SQ_PSN) {
2668 SET_FIELD(qp_params.modify_flags,
2669 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2670 qp_params.sq_psn = attr->sq_psn;
2671 qp->sq_psn = attr->sq_psn;
2672 }
2673
2674 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2675 if (attr->max_dest_rd_atomic >
2676 dev->attr.max_qp_resp_rd_atomic_resc) {
2677 DP_ERR(dev,
2678 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2679 attr->max_dest_rd_atomic,
2680 dev->attr.max_qp_resp_rd_atomic_resc);
2681
2682 rc = -EINVAL;
2683 goto err;
2684 }
2685
2686 SET_FIELD(qp_params.modify_flags,
2687 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2688 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2689 }
2690
2691 if (attr_mask & IB_QP_DEST_QPN) {
2692 SET_FIELD(qp_params.modify_flags,
2693 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2694
2695 qp_params.dest_qp = attr->dest_qp_num;
2696 qp->dest_qp_num = attr->dest_qp_num;
2697 }
2698
caf61b1b
KM
2699 cur_state = qp->state;
2700
2701 /* Update the QP state before the actual ramrod to prevent a race with
2702 * fast path. Modifying the QP state to error will cause the device to
2703 * flush the CQEs and while polling the flushed CQEs will considered as
2704 * a potential issue if the QP isn't in error state.
2705 */
2706 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2707 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2708 qp->state = QED_ROCE_QP_STATE_ERR;
2709
cecbcddf
RA
2710 if (qp->qp_type != IB_QPT_GSI)
2711 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2712 qp->qed_qp, &qp_params);
2713
2714 if (attr_mask & IB_QP_STATE) {
2715 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
caf61b1b
KM
2716 rc = qedr_update_qp_state(dev, qp, cur_state,
2717 qp_params.new_state);
cecbcddf
RA
2718 qp->state = qp_params.new_state;
2719 }
2720
2721err:
2722 return rc;
2723}
2724
2725static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2726{
2727 int ib_qp_acc_flags = 0;
2728
2729 if (params->incoming_rdma_write_en)
2730 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2731 if (params->incoming_rdma_read_en)
2732 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2733 if (params->incoming_atomic_en)
2734 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2735 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2736 return ib_qp_acc_flags;
2737}
2738
2739int qedr_query_qp(struct ib_qp *ibqp,
2740 struct ib_qp_attr *qp_attr,
2741 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2742{
2743 struct qed_rdma_query_qp_out_params params;
2744 struct qedr_qp *qp = get_qedr_qp(ibqp);
2745 struct qedr_dev *dev = qp->dev;
2746 int rc = 0;
2747
2748 memset(&params, 0, sizeof(params));
cecbcddf
RA
2749 memset(qp_attr, 0, sizeof(*qp_attr));
2750 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2751
74e0a362
AP
2752 if (qp->qp_type != IB_QPT_GSI) {
2753 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2754 if (rc)
2755 goto err;
2756 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2757 } else {
2758 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2759 }
2760
cecbcddf 2761 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
097b6159 2762 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
cecbcddf
RA
2763 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2764 qp_attr->rq_psn = params.rq_psn;
2765 qp_attr->sq_psn = params.sq_psn;
2766 qp_attr->dest_qp_num = params.dest_qp;
2767
2768 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2769
2770 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2771 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2772 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2773 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
fbf58026 2774 qp_attr->cap.max_inline_data = dev->attr.max_inline;
cecbcddf
RA
2775 qp_init_attr->cap = qp_attr->cap;
2776
44c58487 2777 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
d8966fcd
DC
2778 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2779 params.flow_label, qp->sgid_idx,
2780 params.hop_limit_ttl, params.traffic_class_tos);
2781 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2782 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2783 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
cecbcddf
RA
2784 qp_attr->timeout = params.timeout;
2785 qp_attr->rnr_retry = params.rnr_retry;
2786 qp_attr->retry_cnt = params.retry_cnt;
2787 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2788 qp_attr->pkey_index = params.pkey_index;
2789 qp_attr->port_num = 1;
d8966fcd
DC
2790 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2791 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
cecbcddf
RA
2792 qp_attr->alt_pkey_index = 0;
2793 qp_attr->alt_port_num = 0;
2794 qp_attr->alt_timeout = 0;
2795 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2796
2797 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2798 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2799 qp_attr->max_rd_atomic = params.max_rd_atomic;
2800 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2801
2802 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2803 qp_attr->cap.max_inline_data);
2804
2805err:
2806 return rc;
2807}
2808
c4367a26 2809int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
cecbcddf
RA
2810{
2811 struct qedr_qp *qp = get_qedr_qp(ibqp);
2812 struct qedr_dev *dev = qp->dev;
2813 struct ib_qp_attr attr;
2814 int attr_mask = 0;
cecbcddf
RA
2815
2816 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2817 qp, qp->qp_type);
2818
f5b1b177
KM
2819 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2820 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2821 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2822 (qp->state != QED_ROCE_QP_STATE_INIT)) {
b4c2cc48 2823
f5b1b177
KM
2824 attr.qp_state = IB_QPS_ERR;
2825 attr_mask |= IB_QP_STATE;
cecbcddf 2826
f5b1b177
KM
2827 /* Change the QP state to ERROR */
2828 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2829 }
e411e058 2830 } else {
82af6d19
MK
2831 /* If connection establishment started the WAIT_FOR_CONNECT
2832 * bit will be on and we need to Wait for the establishment
2833 * to complete before destroying the qp.
2834 */
2835 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2836 &qp->iwarp_cm_flags))
2837 wait_for_completion(&qp->iwarp_cm_comp);
2838
2839 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2840 * bit will be on, and we need to wait for the disconnect to
2841 * complete before continuing. We can use the same completion,
2842 * iwarp_cm_comp, since this is the only place that waits for
2843 * this completion and it is sequential. In addition,
2844 * disconnect can't occur before the connection is fully
2845 * established, therefore if WAIT_FOR_DISCONNECT is on it
2846 * means WAIT_FOR_CONNECT is also on and the completion for
2847 * CONNECT already occurred.
2848 */
2849 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2850 &qp->iwarp_cm_flags))
2851 wait_for_completion(&qp->iwarp_cm_comp);
cecbcddf
RA
2852 }
2853
df158561 2854 if (qp->qp_type == IB_QPT_GSI)
04886779 2855 qedr_destroy_gsi_qp(dev);
cecbcddf 2856
82af6d19
MK
2857 /* We need to remove the entry from the xarray before we release the
2858 * qp_id to avoid a race of the qp_id being reallocated and failing
2859 * on xa_insert
2860 */
2861 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2862 xa_erase(&dev->qps, qp->qp_id);
2863
bdeacabd 2864 qedr_free_qp_resources(dev, qp, udata);
cecbcddf 2865
60fab107 2866 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
82af6d19 2867 qedr_iw_qp_rem_ref(&qp->ibqp);
60fab107
PK
2868 wait_for_completion(&qp->qp_rel_comp);
2869 }
82af6d19 2870
cf167e5e 2871 return 0;
cecbcddf 2872}
e0290cce 2873
fa5d010c 2874int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
d3456914 2875 struct ib_udata *udata)
04886779 2876{
d3456914 2877 struct qedr_ah *ah = get_qedr_ah(ibah);
04886779 2878
fa5d010c 2879 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
04886779 2880
d3456914 2881 return 0;
04886779
RA
2882}
2883
9a9ebf8c 2884int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
04886779
RA
2885{
2886 struct qedr_ah *ah = get_qedr_ah(ibah);
2887
d97099fe 2888 rdma_destroy_ah_attr(&ah->attr);
9a9ebf8c 2889 return 0;
04886779
RA
2890}
2891
e0290cce
RA
2892static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2893{
2894 struct qedr_pbl *pbl, *tmp;
2895
2896 if (info->pbl_table)
2897 list_add_tail(&info->pbl_table->list_entry,
2898 &info->free_pbl_list);
2899
2900 if (!list_empty(&info->inuse_pbl_list))
2901 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2902
2903 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2904 list_del(&pbl->list_entry);
2905 qedr_free_pbl(dev, &info->pbl_info, pbl);
2906 }
2907}
2908
2909static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2910 size_t page_list_len, bool two_layered)
2911{
2912 struct qedr_pbl *tmp;
2913 int rc;
2914
2915 INIT_LIST_HEAD(&info->free_pbl_list);
2916 INIT_LIST_HEAD(&info->inuse_pbl_list);
2917
2918 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2919 page_list_len, two_layered);
2920 if (rc)
2921 goto done;
2922
2923 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
4cd33aaf
CJ
2924 if (IS_ERR(info->pbl_table)) {
2925 rc = PTR_ERR(info->pbl_table);
e0290cce
RA
2926 goto done;
2927 }
2928
2929 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2930 &info->pbl_table->pa);
2931
2932 /* in usual case we use 2 PBLs, so we add one to free
2933 * list and allocating another one
2934 */
2935 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
4cd33aaf 2936 if (IS_ERR(tmp)) {
e0290cce
RA
2937 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2938 goto done;
2939 }
2940
2941 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2942
2943 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2944
2945done:
2946 if (rc)
2947 free_mr_info(dev, info);
2948
2949 return rc;
2950}
2951
2952struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2953 u64 usr_addr, int acc, struct ib_udata *udata)
2954{
2955 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2956 struct qedr_mr *mr;
2957 struct qedr_pd *pd;
2958 int rc = -ENOMEM;
2959
2960 pd = get_qedr_pd(ibpd);
2961 DP_DEBUG(dev, QEDR_MSG_MR,
2962 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2963 pd->pd_id, start, len, usr_addr, acc);
2964
2965 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2966 return ERR_PTR(-EINVAL);
2967
2968 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2969 if (!mr)
2970 return ERR_PTR(rc);
2971
2972 mr->type = QEDR_MR_USER;
2973
c320e527 2974 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
e0290cce
RA
2975 if (IS_ERR(mr->umem)) {
2976 rc = -EFAULT;
2977 goto err0;
2978 }
2979
901bca71
JG
2980 rc = init_mr_info(dev, &mr->info,
2981 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
e0290cce
RA
2982 if (rc)
2983 goto err1;
2984
2985 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
95ad233f 2986 &mr->info.pbl_info, PAGE_SHIFT);
e0290cce
RA
2987
2988 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2989 if (rc) {
0050a576
PK
2990 if (rc == -EINVAL)
2991 DP_ERR(dev, "Out of MR resources\n");
2992 else
2993 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2994
e0290cce
RA
2995 goto err1;
2996 }
2997
2998 /* Index only, 18 bit long, lkey = itid << 8 | key */
2999 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3000 mr->hw_mr.key = 0;
3001 mr->hw_mr.pd = pd->pd_id;
3002 mr->hw_mr.local_read = 1;
3003 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3004 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3005 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3006 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3007 mr->hw_mr.mw_bind = false;
3008 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3009 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3010 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
95ad233f 3011 mr->hw_mr.page_size_log = PAGE_SHIFT;
e0290cce
RA
3012 mr->hw_mr.length = len;
3013 mr->hw_mr.vaddr = usr_addr;
e0290cce
RA
3014 mr->hw_mr.phy_mr = false;
3015 mr->hw_mr.dma_mr = false;
3016
3017 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3018 if (rc) {
3019 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3020 goto err2;
3021 }
3022
3023 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3024 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3025 mr->hw_mr.remote_atomic)
3026 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3027
3028 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3029 mr->ibmr.lkey);
3030 return &mr->ibmr;
3031
3032err2:
3033 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3034err1:
3035 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3036err0:
3037 kfree(mr);
3038 return ERR_PTR(rc);
3039}
3040
c4367a26 3041int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
e0290cce
RA
3042{
3043 struct qedr_mr *mr = get_qedr_mr(ib_mr);
3044 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3045 int rc = 0;
3046
3047 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3048 if (rc)
3049 return rc;
3050
3051 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3052
24e412c1
MK
3053 if (mr->type != QEDR_MR_DMA)
3054 free_mr_info(dev, &mr->info);
e0290cce
RA
3055
3056 /* it could be user registered memory. */
836a0fbb 3057 ib_umem_release(mr->umem);
e0290cce
RA
3058
3059 kfree(mr);
3060
3061 return rc;
3062}
3063
27a4b1a6
RA
3064static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3065 int max_page_list_len)
e0290cce
RA
3066{
3067 struct qedr_pd *pd = get_qedr_pd(ibpd);
3068 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3069 struct qedr_mr *mr;
3070 int rc = -ENOMEM;
3071
3072 DP_DEBUG(dev, QEDR_MSG_MR,
3073 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3074 max_page_list_len);
3075
3076 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3077 if (!mr)
3078 return ERR_PTR(rc);
3079
3080 mr->dev = dev;
3081 mr->type = QEDR_MR_FRMR;
3082
3083 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3084 if (rc)
3085 goto err0;
3086
3087 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3088 if (rc) {
0050a576
PK
3089 if (rc == -EINVAL)
3090 DP_ERR(dev, "Out of MR resources\n");
3091 else
3092 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3093
e0290cce
RA
3094 goto err0;
3095 }
3096
3097 /* Index only, 18 bit long, lkey = itid << 8 | key */
3098 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3099 mr->hw_mr.key = 0;
3100 mr->hw_mr.pd = pd->pd_id;
3101 mr->hw_mr.local_read = 1;
3102 mr->hw_mr.local_write = 0;
3103 mr->hw_mr.remote_read = 0;
3104 mr->hw_mr.remote_write = 0;
3105 mr->hw_mr.remote_atomic = 0;
3106 mr->hw_mr.mw_bind = false;
3107 mr->hw_mr.pbl_ptr = 0;
3108 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3109 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
e0290cce
RA
3110 mr->hw_mr.length = 0;
3111 mr->hw_mr.vaddr = 0;
e0290cce
RA
3112 mr->hw_mr.phy_mr = true;
3113 mr->hw_mr.dma_mr = false;
3114
3115 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3116 if (rc) {
3117 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3118 goto err1;
3119 }
3120
3121 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3122 mr->ibmr.rkey = mr->ibmr.lkey;
3123
3124 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3125 return mr;
3126
3127err1:
3128 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3129err0:
3130 kfree(mr);
3131 return ERR_PTR(rc);
3132}
3133
c4367a26 3134struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
42a3b153 3135 u32 max_num_sg)
e0290cce 3136{
e0290cce
RA
3137 struct qedr_mr *mr;
3138
3139 if (mr_type != IB_MR_TYPE_MEM_REG)
3140 return ERR_PTR(-EINVAL);
3141
3142 mr = __qedr_alloc_mr(ibpd, max_num_sg);
3143
3144 if (IS_ERR(mr))
3145 return ERR_PTR(-EINVAL);
3146
e0290cce
RA
3147 return &mr->ibmr;
3148}
3149
3150static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3151{
3152 struct qedr_mr *mr = get_qedr_mr(ibmr);
3153 struct qedr_pbl *pbl_table;
3154 struct regpair *pbe;
3155 u32 pbes_in_page;
3156
3157 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
ffab8c89 3158 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
e0290cce
RA
3159 return -ENOMEM;
3160 }
3161
3162 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3163 mr->npages, addr);
3164
3165 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3166 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3167 pbe = (struct regpair *)pbl_table->va;
3168 pbe += mr->npages % pbes_in_page;
3169 pbe->lo = cpu_to_le32((u32)addr);
3170 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3171
3172 mr->npages++;
3173
3174 return 0;
3175}
3176
3177static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3178{
3179 int work = info->completed - info->completed_handled - 1;
3180
3181 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3182 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3183 struct qedr_pbl *pbl;
3184
3185 /* Free all the page list that are possible to be freed
3186 * (all the ones that were invalidated), under the assumption
3187 * that if an FMR was completed successfully that means that
3188 * if there was an invalidate operation before it also ended
3189 */
3190 pbl = list_first_entry(&info->inuse_pbl_list,
3191 struct qedr_pbl, list_entry);
aafec388 3192 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
e0290cce
RA
3193 info->completed_handled++;
3194 }
3195}
3196
3197int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3198 int sg_nents, unsigned int *sg_offset)
3199{
3200 struct qedr_mr *mr = get_qedr_mr(ibmr);
3201
3202 mr->npages = 0;
3203
3204 handle_completed_mrs(mr->dev, &mr->info);
3205 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3206}
3207
3208struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3209{
3210 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3211 struct qedr_pd *pd = get_qedr_pd(ibpd);
3212 struct qedr_mr *mr;
3213 int rc;
3214
3215 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3216 if (!mr)
3217 return ERR_PTR(-ENOMEM);
3218
3219 mr->type = QEDR_MR_DMA;
3220
3221 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3222 if (rc) {
0050a576
PK
3223 if (rc == -EINVAL)
3224 DP_ERR(dev, "Out of MR resources\n");
3225 else
3226 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3227
e0290cce
RA
3228 goto err1;
3229 }
3230
3231 /* index only, 18 bit long, lkey = itid << 8 | key */
3232 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3233 mr->hw_mr.pd = pd->pd_id;
3234 mr->hw_mr.local_read = 1;
3235 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3236 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3237 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3238 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3239 mr->hw_mr.dma_mr = true;
3240
3241 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3242 if (rc) {
3243 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3244 goto err2;
3245 }
3246
3247 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3248 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3249 mr->hw_mr.remote_atomic)
3250 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3251
3252 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3253 return &mr->ibmr;
3254
3255err2:
3256 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3257err1:
3258 kfree(mr);
3259 return ERR_PTR(rc);
3260}
afa0e13b
RA
3261
3262static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3263{
3264 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3265}
3266
3267static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3268{
3269 int i, len = 0;
3270
3271 for (i = 0; i < num_sge; i++)
3272 len += sg_list[i].length;
3273
3274 return len;
3275}
3276
3277static void swap_wqe_data64(u64 *p)
3278{
3279 int i;
3280
3281 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3282 *p = cpu_to_be64(cpu_to_le64(*p));
3283}
3284
3285static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3286 struct qedr_qp *qp, u8 *wqe_size,
d34ac5cd
BVA
3287 const struct ib_send_wr *wr,
3288 const struct ib_send_wr **bad_wr,
3289 u8 *bits, u8 bit)
afa0e13b
RA
3290{
3291 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3292 char *seg_prt, *wqe;
3293 int i, seg_siz;
3294
3295 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3296 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3297 *bad_wr = wr;
3298 return 0;
3299 }
3300
3301 if (!data_size)
3302 return data_size;
3303
3304 *bits |= bit;
3305
3306 seg_prt = NULL;
3307 wqe = NULL;
3308 seg_siz = 0;
3309
3310 /* Copy data inline */
3311 for (i = 0; i < wr->num_sge; i++) {
3312 u32 len = wr->sg_list[i].length;
3313 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3314
3315 while (len > 0) {
3316 u32 cur;
3317
3318 /* New segment required */
3319 if (!seg_siz) {
3320 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3321 seg_prt = wqe;
3322 seg_siz = sizeof(struct rdma_sq_common_wqe);
3323 (*wqe_size)++;
3324 }
3325
3326 /* Calculate currently allowed length */
3327 cur = min_t(u32, len, seg_siz);
3328 memcpy(seg_prt, src, cur);
3329
3330 /* Update segment variables */
3331 seg_prt += cur;
3332 seg_siz -= cur;
3333
3334 /* Update sge variables */
3335 src += cur;
3336 len -= cur;
3337
3338 /* Swap fully-completed segments */
3339 if (!seg_siz)
3340 swap_wqe_data64((u64 *)wqe);
3341 }
3342 }
3343
3344 /* swap last not completed segment */
3345 if (seg_siz)
3346 swap_wqe_data64((u64 *)wqe);
3347
3348 return data_size;
3349}
3350
3351#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3352 do { \
3353 DMA_REGPAIR_LE(sge->addr, vaddr); \
3354 (sge)->length = cpu_to_le32(vlength); \
3355 (sge)->flags = cpu_to_le32(vflags); \
3356 } while (0)
3357
3358#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3359 do { \
3360 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3361 (hdr)->num_sges = num_sge; \
3362 } while (0)
3363
3364#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3365 do { \
3366 DMA_REGPAIR_LE(sge->addr, vaddr); \
3367 (sge)->length = cpu_to_le32(vlength); \
3368 (sge)->l_key = cpu_to_le32(vlkey); \
3369 } while (0)
3370
3371static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
f696bf6d 3372 const struct ib_send_wr *wr)
afa0e13b
RA
3373{
3374 u32 data_size = 0;
3375 int i;
3376
3377 for (i = 0; i < wr->num_sge; i++) {
3378 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3379
3380 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3381 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3382 sge->length = cpu_to_le32(wr->sg_list[i].length);
3383 data_size += wr->sg_list[i].length;
3384 }
3385
3386 if (wqe_size)
3387 *wqe_size += wr->num_sge;
3388
3389 return data_size;
3390}
3391
3392static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3393 struct qedr_qp *qp,
3394 struct rdma_sq_rdma_wqe_1st *rwqe,
3395 struct rdma_sq_rdma_wqe_2nd *rwqe2,
d34ac5cd
BVA
3396 const struct ib_send_wr *wr,
3397 const struct ib_send_wr **bad_wr)
afa0e13b
RA
3398{
3399 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3400 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3401
8b0cabc6
AR
3402 if (wr->send_flags & IB_SEND_INLINE &&
3403 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3404 wr->opcode == IB_WR_RDMA_WRITE)) {
afa0e13b
RA
3405 u8 flags = 0;
3406
3407 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3408 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3409 bad_wr, &rwqe->flags, flags);
3410 }
3411
3412 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3413}
3414
3415static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3416 struct qedr_qp *qp,
3417 struct rdma_sq_send_wqe_1st *swqe,
3418 struct rdma_sq_send_wqe_2st *swqe2,
d34ac5cd
BVA
3419 const struct ib_send_wr *wr,
3420 const struct ib_send_wr **bad_wr)
afa0e13b
RA
3421{
3422 memset(swqe2, 0, sizeof(*swqe2));
3423 if (wr->send_flags & IB_SEND_INLINE) {
3424 u8 flags = 0;
3425
3426 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3427 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3428 bad_wr, &swqe->flags, flags);
3429 }
3430
3431 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3432}
3433
3434static int qedr_prepare_reg(struct qedr_qp *qp,
3435 struct rdma_sq_fmr_wqe_1st *fwqe1,
f696bf6d 3436 const struct ib_reg_wr *wr)
afa0e13b
RA
3437{
3438 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3439 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3440
3441 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3442 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3443 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3444 fwqe1->l_key = wr->key;
3445
08c4cf51
AR
3446 fwqe2->access_ctrl = 0;
3447
afa0e13b
RA
3448 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3449 !!(wr->access & IB_ACCESS_REMOTE_READ));
3450 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3451 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3452 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3453 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3454 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3455 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3456 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3457 fwqe2->fmr_ctrl = 0;
3458
3459 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3460 ilog2(mr->ibmr.page_size) - 12);
3461
3462 fwqe2->length_hi = 0;
3463 fwqe2->length_lo = mr->ibmr.length;
3464 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3465 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3466
3467 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3468
3469 return 0;
3470}
3471
27a4b1a6 3472static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
afa0e13b
RA
3473{
3474 switch (opcode) {
3475 case IB_WR_RDMA_WRITE:
3476 case IB_WR_RDMA_WRITE_WITH_IMM:
3477 return IB_WC_RDMA_WRITE;
3478 case IB_WR_SEND_WITH_IMM:
3479 case IB_WR_SEND:
3480 case IB_WR_SEND_WITH_INV:
3481 return IB_WC_SEND;
3482 case IB_WR_RDMA_READ:
fb1a22be 3483 case IB_WR_RDMA_READ_WITH_INV:
afa0e13b
RA
3484 return IB_WC_RDMA_READ;
3485 case IB_WR_ATOMIC_CMP_AND_SWP:
3486 return IB_WC_COMP_SWAP;
3487 case IB_WR_ATOMIC_FETCH_AND_ADD:
3488 return IB_WC_FETCH_ADD;
3489 case IB_WR_REG_MR:
3490 return IB_WC_REG_MR;
3491 case IB_WR_LOCAL_INV:
3492 return IB_WC_LOCAL_INV;
3493 default:
3494 return IB_WC_SEND;
3495 }
3496}
3497
f696bf6d
BVA
3498static inline bool qedr_can_post_send(struct qedr_qp *qp,
3499 const struct ib_send_wr *wr)
afa0e13b
RA
3500{
3501 int wq_is_full, err_wr, pbl_is_full;
3502 struct qedr_dev *dev = qp->dev;
3503
3504 /* prevent SQ overflow and/or processing of a bad WR */
3505 err_wr = wr->num_sge > qp->sq.max_sges;
3506 wq_is_full = qedr_wq_is_full(&qp->sq);
3507 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3508 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3509 if (wq_is_full || err_wr || pbl_is_full) {
3510 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3511 DP_ERR(dev,
3512 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3513 qp);
3514 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3515 }
3516
3517 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3518 DP_ERR(dev,
3519 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3520 qp);
3521 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3522 }
3523
3524 if (pbl_is_full &&
3525 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3526 DP_ERR(dev,
3527 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3528 qp);
3529 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3530 }
3531 return false;
3532 }
3533 return true;
3534}
3535
d34ac5cd
BVA
3536static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3537 const struct ib_send_wr **bad_wr)
afa0e13b
RA
3538{
3539 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3540 struct qedr_qp *qp = get_qedr_qp(ibqp);
3541 struct rdma_sq_atomic_wqe_1st *awqe1;
3542 struct rdma_sq_atomic_wqe_2nd *awqe2;
3543 struct rdma_sq_atomic_wqe_3rd *awqe3;
3544 struct rdma_sq_send_wqe_2st *swqe2;
3545 struct rdma_sq_local_inv_wqe *iwqe;
3546 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3547 struct rdma_sq_send_wqe_1st *swqe;
3548 struct rdma_sq_rdma_wqe_1st *rwqe;
3549 struct rdma_sq_fmr_wqe_1st *fwqe1;
3550 struct rdma_sq_common_wqe *wqe;
3551 u32 length;
3552 int rc = 0;
3553 bool comp;
3554
3555 if (!qedr_can_post_send(qp, wr)) {
3556 *bad_wr = wr;
3557 return -ENOMEM;
3558 }
3559
3560 wqe = qed_chain_produce(&qp->sq.pbl);
3561 qp->wqe_wr_id[qp->sq.prod].signaled =
3562 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3563
3564 wqe->flags = 0;
3565 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3566 !!(wr->send_flags & IB_SEND_SOLICITED));
3567 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3568 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3569 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3570 !!(wr->send_flags & IB_SEND_FENCE));
3571 wqe->prev_wqe_size = qp->prev_wqe_size;
3572
3573 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3574
3575 switch (wr->opcode) {
3576 case IB_WR_SEND_WITH_IMM:
551e1c67
KM
3577 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3578 rc = -EINVAL;
3579 *bad_wr = wr;
3580 break;
3581 }
afa0e13b
RA
3582 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3583 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3584 swqe->wqe_size = 2;
3585 swqe2 = qed_chain_produce(&qp->sq.pbl);
3586
7bed7ebc 3587 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
afa0e13b
RA
3588 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3589 wr, bad_wr);
3590 swqe->length = cpu_to_le32(length);
3591 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3592 qp->prev_wqe_size = swqe->wqe_size;
3593 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3594 break;
3595 case IB_WR_SEND:
3596 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3597 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3598
3599 swqe->wqe_size = 2;
3600 swqe2 = qed_chain_produce(&qp->sq.pbl);
3601 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3602 wr, bad_wr);
3603 swqe->length = cpu_to_le32(length);
3604 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3605 qp->prev_wqe_size = swqe->wqe_size;
3606 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3607 break;
3608 case IB_WR_SEND_WITH_INV:
3609 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3610 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3611 swqe2 = qed_chain_produce(&qp->sq.pbl);
3612 swqe->wqe_size = 2;
3613 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3614 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3615 wr, bad_wr);
3616 swqe->length = cpu_to_le32(length);
3617 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3618 qp->prev_wqe_size = swqe->wqe_size;
3619 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3620 break;
3621
3622 case IB_WR_RDMA_WRITE_WITH_IMM:
551e1c67
KM
3623 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3624 rc = -EINVAL;
3625 *bad_wr = wr;
3626 break;
3627 }
afa0e13b
RA
3628 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3629 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3630
3631 rwqe->wqe_size = 2;
3632 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3633 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3634 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3635 wr, bad_wr);
3636 rwqe->length = cpu_to_le32(length);
3637 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3638 qp->prev_wqe_size = rwqe->wqe_size;
3639 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3640 break;
3641 case IB_WR_RDMA_WRITE:
3642 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3643 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3644
3645 rwqe->wqe_size = 2;
3646 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3647 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3648 wr, bad_wr);
3649 rwqe->length = cpu_to_le32(length);
3650 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3651 qp->prev_wqe_size = rwqe->wqe_size;
3652 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3653 break;
3654 case IB_WR_RDMA_READ_WITH_INV:
fb1a22be 3655 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
df561f66 3656 fallthrough; /* same is identical to RDMA READ */
afa0e13b
RA
3657
3658 case IB_WR_RDMA_READ:
3659 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3660 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3661
3662 rwqe->wqe_size = 2;
3663 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3664 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3665 wr, bad_wr);
3666 rwqe->length = cpu_to_le32(length);
3667 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3668 qp->prev_wqe_size = rwqe->wqe_size;
3669 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3670 break;
3671
3672 case IB_WR_ATOMIC_CMP_AND_SWP:
3673 case IB_WR_ATOMIC_FETCH_AND_ADD:
3674 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3675 awqe1->wqe_size = 4;
3676
3677 awqe2 = qed_chain_produce(&qp->sq.pbl);
3678 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3679 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3680
3681 awqe3 = qed_chain_produce(&qp->sq.pbl);
3682
3683 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3684 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3685 DMA_REGPAIR_LE(awqe3->swap_data,
3686 atomic_wr(wr)->compare_add);
3687 } else {
3688 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3689 DMA_REGPAIR_LE(awqe3->swap_data,
3690 atomic_wr(wr)->swap);
3691 DMA_REGPAIR_LE(awqe3->cmp_data,
3692 atomic_wr(wr)->compare_add);
3693 }
3694
3695 qedr_prepare_sq_sges(qp, NULL, wr);
3696
3697 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3698 qp->prev_wqe_size = awqe1->wqe_size;
3699 break;
3700
3701 case IB_WR_LOCAL_INV:
3702 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3703 iwqe->wqe_size = 1;
3704
3705 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3706 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3707 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3708 qp->prev_wqe_size = iwqe->wqe_size;
3709 break;
3710 case IB_WR_REG_MR:
3711 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3712 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3713 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3714 fwqe1->wqe_size = 2;
3715
3716 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3717 if (rc) {
3718 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3719 *bad_wr = wr;
3720 break;
3721 }
3722
3723 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3724 qp->prev_wqe_size = fwqe1->wqe_size;
3725 break;
3726 default:
3727 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3728 rc = -EINVAL;
3729 *bad_wr = wr;
3730 break;
3731 }
3732
3733 if (*bad_wr) {
3734 u16 value;
3735
3736 /* Restore prod to its position before
3737 * this WR was processed
3738 */
3739 value = le16_to_cpu(qp->sq.db_data.data.value);
3740 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3741
3742 /* Restore prev_wqe_size */
3743 qp->prev_wqe_size = wqe->prev_wqe_size;
3744 rc = -EINVAL;
3745 DP_ERR(dev, "POST SEND FAILED\n");
3746 }
3747
3748 return rc;
3749}
3750
d34ac5cd
BVA
3751int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3752 const struct ib_send_wr **bad_wr)
afa0e13b
RA
3753{
3754 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3755 struct qedr_qp *qp = get_qedr_qp(ibqp);
3756 unsigned long flags;
3757 int rc = 0;
3758
3759 *bad_wr = NULL;
3760
04886779
RA
3761 if (qp->qp_type == IB_QPT_GSI)
3762 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3763
afa0e13b
RA
3764 spin_lock_irqsave(&qp->q_lock, flags);
3765
f5b1b177
KM
3766 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3767 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3768 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3769 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3770 spin_unlock_irqrestore(&qp->q_lock, flags);
3771 *bad_wr = wr;
3772 DP_DEBUG(dev, QEDR_MSG_CQ,
3773 "QP in wrong state! QP icid=0x%x state %d\n",
3774 qp->icid, qp->state);
3775 return -EINVAL;
3776 }
afa0e13b
RA
3777 }
3778
afa0e13b
RA
3779 while (wr) {
3780 rc = __qedr_post_send(ibqp, wr, bad_wr);
3781 if (rc)
3782 break;
3783
3784 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3785
3786 qedr_inc_sw_prod(&qp->sq);
3787
3788 qp->sq.db_data.data.value++;
3789
3790 wr = wr->next;
3791 }
3792
3793 /* Trigger doorbell
3794 * If there was a failure in the first WR then it will be triggered in
3795 * vane. However this is not harmful (as long as the producer value is
3796 * unchanged). For performance reasons we avoid checking for this
3797 * redundant doorbell.
09c4854f
KM
3798 *
3799 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3800 * soon as we give the doorbell, we could get a completion
3801 * for this wr, therefore we need to make sure that the
3802 * memory is updated before giving the doorbell.
3803 * During qedr_poll_cq, rmb is called before accessing the
3804 * cqe. This covers for the smp_rmb as well.
afa0e13b 3805 */
09c4854f 3806 smp_wmb();
afa0e13b
RA
3807 writel(qp->sq.db_data.raw, qp->sq.db);
3808
afa0e13b
RA
3809 spin_unlock_irqrestore(&qp->q_lock, flags);
3810
3811 return rc;
3812}
3813
3491c9e7
YB
3814static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3815{
3816 u32 used;
3817
3818 /* Calculate number of elements used based on producer
3819 * count and consumer count and subtract it from max
3820 * work request supported so that we get elements left.
3821 */
acca72e2 3822 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3491c9e7
YB
3823
3824 return hw_srq->max_wr - used;
3825}
3826
3827int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3828 const struct ib_recv_wr **bad_wr)
3829{
3830 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3831 struct qedr_srq_hwq_info *hw_srq;
3832 struct qedr_dev *dev = srq->dev;
3833 struct qed_chain *pbl;
3834 unsigned long flags;
3835 int status = 0;
3836 u32 num_sge;
3491c9e7
YB
3837
3838 spin_lock_irqsave(&srq->lock, flags);
3839
3840 hw_srq = &srq->hw_srq;
3841 pbl = &srq->hw_srq.pbl;
3842 while (wr) {
3843 struct rdma_srq_wqe_header *hdr;
3844 int i;
3845
3846 if (!qedr_srq_elem_left(hw_srq) ||
3847 wr->num_sge > srq->hw_srq.max_sges) {
3848 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
acca72e2
YB
3849 hw_srq->wr_prod_cnt,
3850 atomic_read(&hw_srq->wr_cons_cnt),
3491c9e7
YB
3851 wr->num_sge, srq->hw_srq.max_sges);
3852 status = -ENOMEM;
3853 *bad_wr = wr;
3854 break;
3855 }
3856
3857 hdr = qed_chain_produce(pbl);
3858 num_sge = wr->num_sge;
3859 /* Set number of sge and work request id in header */
3860 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3861
3862 srq->hw_srq.wr_prod_cnt++;
3863 hw_srq->wqe_prod++;
3864 hw_srq->sge_prod++;
3865
3866 DP_DEBUG(dev, QEDR_MSG_SRQ,
3867 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3868 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3869
3870 for (i = 0; i < wr->num_sge; i++) {
3871 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3872
3873 /* Set SGE length, lkey and address */
3874 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3875 wr->sg_list[i].length, wr->sg_list[i].lkey);
3876
3877 DP_DEBUG(dev, QEDR_MSG_SRQ,
3878 "[%d]: len %d key %x addr %x:%x\n",
3879 i, srq_sge->length, srq_sge->l_key,
3880 srq_sge->addr.hi, srq_sge->addr.lo);
3881 hw_srq->sge_prod++;
3882 }
3883
acca72e2 3884 /* Update WQE and SGE information before
3491c9e7
YB
3885 * updating producer.
3886 */
acca72e2 3887 dma_wmb();
3491c9e7
YB
3888
3889 /* SRQ producer is 8 bytes. Need to update SGE producer index
3890 * in first 4 bytes and need to update WQE producer in
3891 * next 4 bytes.
3892 */
f45271ac 3893 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
acca72e2
YB
3894 /* Make sure sge producer is updated first */
3895 dma_wmb();
f45271ac 3896 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3491c9e7 3897
3491c9e7
YB
3898 wr = wr->next;
3899 }
3900
3901 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3902 qed_chain_get_elem_left(pbl));
3903 spin_unlock_irqrestore(&srq->lock, flags);
3904
3905 return status;
3906}
3907
d34ac5cd
BVA
3908int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3909 const struct ib_recv_wr **bad_wr)
afa0e13b
RA
3910{
3911 struct qedr_qp *qp = get_qedr_qp(ibqp);
3912 struct qedr_dev *dev = qp->dev;
3913 unsigned long flags;
3914 int status = 0;
3915
04886779
RA
3916 if (qp->qp_type == IB_QPT_GSI)
3917 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3918
afa0e13b
RA
3919 spin_lock_irqsave(&qp->q_lock, flags);
3920
afa0e13b
RA
3921 while (wr) {
3922 int i;
3923
3924 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3925 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3926 wr->num_sge > qp->rq.max_sges) {
3927 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3928 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3929 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3930 qp->rq.max_sges);
3931 status = -ENOMEM;
3932 *bad_wr = wr;
3933 break;
3934 }
3935 for (i = 0; i < wr->num_sge; i++) {
3936 u32 flags = 0;
3937 struct rdma_rq_sge *rqe =
3938 qed_chain_produce(&qp->rq.pbl);
3939
3940 /* First one must include the number
3941 * of SGE in the list
3942 */
3943 if (!i)
3944 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3945 wr->num_sge);
3946
d52c89f1 3947 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
afa0e13b
RA
3948 wr->sg_list[i].lkey);
3949
3950 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3951 wr->sg_list[i].length, flags);
3952 }
3953
3954 /* Special case of no sges. FW requires between 1-4 sges...
3955 * in this case we need to post 1 sge with length zero. this is
3956 * because rdma write with immediate consumes an RQ.
3957 */
3958 if (!wr->num_sge) {
3959 u32 flags = 0;
3960 struct rdma_rq_sge *rqe =
3961 qed_chain_produce(&qp->rq.pbl);
3962
3963 /* First one must include the number
3964 * of SGE in the list
3965 */
d52c89f1 3966 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
afa0e13b
RA
3967 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3968
3969 RQ_SGE_SET(rqe, 0, 0, flags);
3970 i = 1;
3971 }
3972
3973 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3974 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3975
3976 qedr_inc_sw_prod(&qp->rq);
3977
09c4854f
KM
3978 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3979 * soon as we give the doorbell, we could get a completion
3980 * for this wr, therefore we need to make sure that the
3981 * memory is update before giving the doorbell.
3982 * During qedr_poll_cq, rmb is called before accessing the
3983 * cqe. This covers for the smp_rmb as well.
3984 */
3985 smp_wmb();
afa0e13b
RA
3986
3987 qp->rq.db_data.data.value++;
3988
3989 writel(qp->rq.db_data.raw, qp->rq.db);
3990
f5b1b177
KM
3991 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3992 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
f5b1b177
KM
3993 }
3994
afa0e13b
RA
3995 wr = wr->next;
3996 }
3997
3998 spin_unlock_irqrestore(&qp->q_lock, flags);
3999
4000 return status;
4001}
4002
4003static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4004{
4005 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4006
4007 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4008 cq->pbl_toggle;
4009}
4010
4011static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4012{
4013 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4014 struct qedr_qp *qp;
4015
4016 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4017 resp_cqe->qp_handle.lo,
4018 u64);
4019 return qp;
4020}
4021
4022static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4023{
4024 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4025
4026 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4027}
4028
4029/* Return latest CQE (needs processing) */
4030static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4031{
4032 return cq->latest_cqe;
4033}
4034
4035/* In fmr we need to increase the number of fmr completed counter for the fmr
4036 * algorithm determining whether we can free a pbl or not.
4037 * we need to perform this whether the work request was signaled or not. for
4038 * this purpose we call this function from the condition that checks if a wr
4039 * should be skipped, to make sure we don't miss it ( possibly this fmr
4040 * operation was not signalted)
4041 */
4042static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4043{
4044 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4045 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4046}
4047
4048static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4049 struct qedr_cq *cq, int num_entries,
4050 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4051 int force)
4052{
4053 u16 cnt = 0;
4054
4055 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4056 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4057 qedr_chk_if_fmr(qp);
4058 /* skip WC */
4059 goto next_cqe;
4060 }
4061
4062 /* fill WC */
4063 wc->status = status;
27035a1b 4064 wc->vendor_err = 0;
afa0e13b
RA
4065 wc->wc_flags = 0;
4066 wc->src_qp = qp->id;
4067 wc->qp = &qp->ibqp;
4068
4069 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4070 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4071
4072 switch (wc->opcode) {
4073 case IB_WC_RDMA_WRITE:
4074 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4075 break;
4076 case IB_WC_COMP_SWAP:
4077 case IB_WC_FETCH_ADD:
4078 wc->byte_len = 8;
4079 break;
4080 case IB_WC_REG_MR:
4081 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4082 break;
dac27386
MK
4083 case IB_WC_RDMA_READ:
4084 case IB_WC_SEND:
4085 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4086 break;
afa0e13b
RA
4087 default:
4088 break;
4089 }
4090
4091 num_entries--;
4092 wc++;
4093 cnt++;
4094next_cqe:
4095 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4096 qed_chain_consume(&qp->sq.pbl);
4097 qedr_inc_sw_cons(&qp->sq);
4098 }
4099
4100 return cnt;
4101}
4102
4103static int qedr_poll_cq_req(struct qedr_dev *dev,
4104 struct qedr_qp *qp, struct qedr_cq *cq,
4105 int num_entries, struct ib_wc *wc,
4106 struct rdma_cqe_requester *req)
4107{
4108 int cnt = 0;
4109
4110 switch (req->status) {
4111 case RDMA_CQE_REQ_STS_OK:
4112 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4113 IB_WC_SUCCESS, 0);
4114 break;
4115 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
c78c3149 4116 if (qp->state != QED_ROCE_QP_STATE_ERR)
dc728f77
KM
4117 DP_DEBUG(dev, QEDR_MSG_CQ,
4118 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4119 cq->icid, qp->icid);
afa0e13b 4120 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
74c3875c 4121 IB_WC_WR_FLUSH_ERR, 1);
afa0e13b
RA
4122 break;
4123 default:
4124 /* process all WQE before the cosumer */
4125 qp->state = QED_ROCE_QP_STATE_ERR;
4126 cnt = process_req(dev, qp, cq, num_entries, wc,
4127 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4128 wc += cnt;
4129 /* if we have extra WC fill it with actual error info */
4130 if (cnt < num_entries) {
4131 enum ib_wc_status wc_status;
4132
4133 switch (req->status) {
4134 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4135 DP_ERR(dev,
4136 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4137 cq->icid, qp->icid);
4138 wc_status = IB_WC_BAD_RESP_ERR;
4139 break;
4140 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4141 DP_ERR(dev,
4142 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4143 cq->icid, qp->icid);
4144 wc_status = IB_WC_LOC_LEN_ERR;
4145 break;
4146 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4147 DP_ERR(dev,
4148 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4149 cq->icid, qp->icid);
4150 wc_status = IB_WC_LOC_QP_OP_ERR;
4151 break;
4152 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4153 DP_ERR(dev,
4154 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4155 cq->icid, qp->icid);
4156 wc_status = IB_WC_LOC_PROT_ERR;
4157 break;
4158 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4159 DP_ERR(dev,
4160 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4161 cq->icid, qp->icid);
4162 wc_status = IB_WC_MW_BIND_ERR;
4163 break;
4164 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4165 DP_ERR(dev,
4166 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4167 cq->icid, qp->icid);
4168 wc_status = IB_WC_REM_INV_REQ_ERR;
4169 break;
4170 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4171 DP_ERR(dev,
4172 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4173 cq->icid, qp->icid);
4174 wc_status = IB_WC_REM_ACCESS_ERR;
4175 break;
4176 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4177 DP_ERR(dev,
4178 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4179 cq->icid, qp->icid);
4180 wc_status = IB_WC_REM_OP_ERR;
4181 break;
4182 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4183 DP_ERR(dev,
4184 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4185 cq->icid, qp->icid);
4186 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4187 break;
4188 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4189 DP_ERR(dev,
4190 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4191 cq->icid, qp->icid);
4192 wc_status = IB_WC_RETRY_EXC_ERR;
4193 break;
4194 default:
4195 DP_ERR(dev,
4196 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4197 cq->icid, qp->icid);
4198 wc_status = IB_WC_GENERAL_ERR;
4199 }
4200 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4201 wc_status, 1);
4202 }
4203 }
4204
4205 return cnt;
4206}
4207
b6acd71f 4208static inline int qedr_cqe_resp_status_to_ib(u8 status)
afa0e13b 4209{
b6acd71f 4210 switch (status) {
afa0e13b 4211 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
b6acd71f 4212 return IB_WC_LOC_ACCESS_ERR;
afa0e13b 4213 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
b6acd71f 4214 return IB_WC_LOC_LEN_ERR;
afa0e13b 4215 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
b6acd71f 4216 return IB_WC_LOC_QP_OP_ERR;
afa0e13b 4217 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
b6acd71f 4218 return IB_WC_LOC_PROT_ERR;
afa0e13b 4219 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
b6acd71f 4220 return IB_WC_MW_BIND_ERR;
afa0e13b 4221 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
b6acd71f 4222 return IB_WC_REM_INV_RD_REQ_ERR;
afa0e13b 4223 case RDMA_CQE_RESP_STS_OK:
b6acd71f
AR
4224 return IB_WC_SUCCESS;
4225 default:
4226 return IB_WC_GENERAL_ERR;
4227 }
4228}
afa0e13b 4229
b6acd71f
AR
4230static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4231 struct ib_wc *wc)
4232{
4233 wc->status = IB_WC_SUCCESS;
4234 wc->byte_len = le32_to_cpu(resp->length);
afa0e13b 4235
b6acd71f 4236 if (resp->flags & QEDR_RESP_IMM) {
7bed7ebc 4237 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
b6acd71f
AR
4238 wc->wc_flags |= IB_WC_WITH_IMM;
4239
4240 if (resp->flags & QEDR_RESP_RDMA)
afa0e13b
RA
4241 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4242
b6acd71f
AR
4243 if (resp->flags & QEDR_RESP_INV)
4244 return -EINVAL;
4245
4246 } else if (resp->flags & QEDR_RESP_INV) {
4247 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4248 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4249
4250 if (resp->flags & QEDR_RESP_RDMA)
4251 return -EINVAL;
4252
4253 } else if (resp->flags & QEDR_RESP_RDMA) {
4254 return -EINVAL;
4255 }
4256
4257 return 0;
4258}
4259
4260static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4261 struct qedr_cq *cq, struct ib_wc *wc,
4262 struct rdma_cqe_responder *resp, u64 wr_id)
4263{
4264 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4265 wc->opcode = IB_WC_RECV;
4266 wc->wc_flags = 0;
4267
4268 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4269 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4270 DP_ERR(dev,
4271 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4272 cq, cq->icid, resp->flags);
4273
4274 } else {
4275 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4276 if (wc->status == IB_WC_GENERAL_ERR)
4277 DP_ERR(dev,
4278 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4279 cq, cq->icid, resp->status);
afa0e13b
RA
4280 }
4281
b6acd71f 4282 /* Fill the rest of the WC */
27035a1b 4283 wc->vendor_err = 0;
afa0e13b
RA
4284 wc->src_qp = qp->id;
4285 wc->qp = &qp->ibqp;
4286 wc->wr_id = wr_id;
4287}
4288
3491c9e7
YB
4289static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4290 struct qedr_cq *cq, struct ib_wc *wc,
4291 struct rdma_cqe_responder *resp)
4292{
4293 struct qedr_srq *srq = qp->srq;
4294 u64 wr_id;
4295
4296 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4297 le32_to_cpu(resp->srq_wr_id.lo), u64);
4298
4299 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4300 wc->status = IB_WC_WR_FLUSH_ERR;
4301 wc->vendor_err = 0;
4302 wc->wr_id = wr_id;
4303 wc->byte_len = 0;
4304 wc->src_qp = qp->id;
4305 wc->qp = &qp->ibqp;
4306 wc->wr_id = wr_id;
4307 } else {
4308 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4309 }
acca72e2 4310 atomic_inc(&srq->hw_srq.wr_cons_cnt);
3491c9e7
YB
4311
4312 return 1;
4313}
afa0e13b
RA
4314static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4315 struct qedr_cq *cq, struct ib_wc *wc,
4316 struct rdma_cqe_responder *resp)
4317{
4318 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4319
4320 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4321
4322 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4323 qed_chain_consume(&qp->rq.pbl);
4324 qedr_inc_sw_cons(&qp->rq);
4325
4326 return 1;
4327}
4328
4329static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4330 int num_entries, struct ib_wc *wc, u16 hw_cons)
4331{
4332 u16 cnt = 0;
4333
4334 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4335 /* fill WC */
4336 wc->status = IB_WC_WR_FLUSH_ERR;
27035a1b 4337 wc->vendor_err = 0;
afa0e13b
RA
4338 wc->wc_flags = 0;
4339 wc->src_qp = qp->id;
4340 wc->byte_len = 0;
4341 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4342 wc->qp = &qp->ibqp;
4343 num_entries--;
4344 wc++;
4345 cnt++;
4346 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4347 qed_chain_consume(&qp->rq.pbl);
4348 qedr_inc_sw_cons(&qp->rq);
4349 }
4350
4351 return cnt;
4352}
4353
4354static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4355 struct rdma_cqe_responder *resp, int *update)
4356{
50bc60cb 4357 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
afa0e13b
RA
4358 consume_cqe(cq);
4359 *update |= 1;
4360 }
4361}
4362
3491c9e7
YB
4363static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4364 struct qedr_cq *cq, int num_entries,
4365 struct ib_wc *wc,
4366 struct rdma_cqe_responder *resp)
4367{
4368 int cnt;
4369
4370 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4371 consume_cqe(cq);
4372
4373 return cnt;
4374}
4375
afa0e13b
RA
4376static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4377 struct qedr_cq *cq, int num_entries,
4378 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4379 int *update)
4380{
4381 int cnt;
4382
4383 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4384 cnt = process_resp_flush(qp, cq, num_entries, wc,
50bc60cb 4385 resp->rq_cons_or_srq_id);
afa0e13b
RA
4386 try_consume_resp_cqe(cq, qp, resp, update);
4387 } else {
4388 cnt = process_resp_one(dev, qp, cq, wc, resp);
4389 consume_cqe(cq);
4390 *update |= 1;
4391 }
4392
4393 return cnt;
4394}
4395
4396static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4397 struct rdma_cqe_requester *req, int *update)
4398{
4399 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4400 consume_cqe(cq);
4401 *update |= 1;
4402 }
4403}
4404
4405int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4406{
4407 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4408 struct qedr_cq *cq = get_qedr_cq(ibcq);
e3fd112c 4409 union rdma_cqe *cqe;
afa0e13b
RA
4410 u32 old_cons, new_cons;
4411 unsigned long flags;
4412 int update = 0;
4413 int done = 0;
4414
4dd72636
AR
4415 if (cq->destroyed) {
4416 DP_ERR(dev,
4417 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4418 cq, cq->icid);
4419 return 0;
4420 }
4421
04886779
RA
4422 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4423 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4424
afa0e13b 4425 spin_lock_irqsave(&cq->cq_lock, flags);
e3fd112c 4426 cqe = cq->latest_cqe;
afa0e13b
RA
4427 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4428 while (num_entries && is_valid_cqe(cq, cqe)) {
4429 struct qedr_qp *qp;
4430 int cnt = 0;
4431
4432 /* prevent speculative reads of any field of CQE */
4433 rmb();
4434
4435 qp = cqe_get_qp(cqe);
4436 if (!qp) {
4437 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4438 break;
4439 }
4440
4441 wc->qp = &qp->ibqp;
4442
4443 switch (cqe_get_type(cqe)) {
4444 case RDMA_CQE_TYPE_REQUESTER:
4445 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4446 &cqe->req);
4447 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4448 break;
4449 case RDMA_CQE_TYPE_RESPONDER_RQ:
4450 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4451 &cqe->resp, &update);
4452 break;
3491c9e7
YB
4453 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4454 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4455 wc, &cqe->resp);
4456 update = 1;
4457 break;
afa0e13b
RA
4458 case RDMA_CQE_TYPE_INVALID:
4459 default:
4460 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4461 cqe_get_type(cqe));
4462 }
4463 num_entries -= cnt;
4464 wc += cnt;
4465 done += cnt;
4466
4467 cqe = get_cqe(cq);
4468 }
4469 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4470
4471 cq->cq_cons += new_cons - old_cons;
4472
4473 if (update)
4474 /* doorbell notifies abount latest VALID entry,
4475 * but chain already point to the next INVALID one
4476 */
4477 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4478
4479 spin_unlock_irqrestore(&cq->cq_lock, flags);
4480 return done;
4481}
993d1b52
RA
4482
4483int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
1fb7f897 4484 u32 port_num, const struct ib_wc *in_wc,
e26e7b88
LR
4485 const struct ib_grh *in_grh, const struct ib_mad *in,
4486 struct ib_mad *out_mad, size_t *out_mad_size,
4487 u16 *out_mad_pkey_index)
993d1b52 4488{
993d1b52
RA
4489 return IB_MAD_RESULT_SUCCESS;
4490}