]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/infiniband/hw/qedr/verbs.c
qedr: return correct value on modify qp
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / hw / qedr / verbs.c
CommitLineData
ac1b36e5
RA
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include "qedr_hsi.h"
47#include <linux/qed/qed_if.h>
48#include "qedr.h"
49#include "verbs.h"
50#include <rdma/qedr-abi.h>
cecbcddf 51#include "qedr_cm.h"
ac1b36e5 52
a7efd777
RA
53#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56{
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58 return -EINVAL;
59
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
61 return 0;
62}
63
ac1b36e5
RA
64int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65 union ib_gid *sgid)
66{
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
68 int rc = 0;
69
70 if (!rdma_cap_roce_gid_table(ibdev, port))
71 return -ENODEV;
72
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74 if (rc == -EAGAIN) {
75 memcpy(sgid, &zgid, sizeof(*sgid));
76 return 0;
77 }
78
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
81
82 return rc;
83}
84
85int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
88{
89 if (!rdma_cap_roce_gid_table(device, port_num))
90 return -EINVAL;
91
92 if (port_num > QEDR_MAX_PORT)
93 return -EINVAL;
94
95 if (!context)
96 return -EINVAL;
97
98 return 0;
99}
100
101int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
103{
104 if (!rdma_cap_roce_gid_table(device, port_num))
105 return -EINVAL;
106
107 if (port_num > QEDR_MAX_PORT)
108 return -EINVAL;
109
110 if (!context)
111 return -EINVAL;
112
113 return 0;
114}
115
116int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
118{
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
121
122 if (!dev->rdma_ctx) {
123 DP_ERR(dev,
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
125 dev->rdma_ctx);
126 return -EINVAL;
127 }
128
129 memset(attr, 0, sizeof(*attr));
130
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
159
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
163
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
168
169 return 0;
170}
171
172#define QEDR_SPEED_SDR (1)
173#define QEDR_SPEED_DDR (2)
174#define QEDR_SPEED_QDR (4)
175#define QEDR_SPEED_FDR10 (8)
176#define QEDR_SPEED_FDR (16)
177#define QEDR_SPEED_EDR (32)
178
179static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180 u8 *ib_width)
181{
182 switch (speed) {
183 case 1000:
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
186 break;
187 case 10000:
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
190 break;
191
192 case 20000:
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
195 break;
196
197 case 25000:
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
200 break;
201
202 case 40000:
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
205 break;
206
207 case 50000:
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
210 break;
211
212 case 100000:
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
215 break;
216
217 default:
218 /* Unsupported */
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
221 }
222}
223
224int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225{
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
228
229 dev = get_qedr_dev(ibdev);
230 if (port > 1) {
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
232 return -EINVAL;
233 }
234
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
237 return -EINVAL;
238 }
239
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
242
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
246 } else {
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
249 }
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252 attr->lid = 0;
253 attr->lmc = 0;
254 attr->sm_lid = 0;
255 attr->sm_sl = 0;
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
265
266 return 0;
267}
268
269int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
271{
272 struct qedr_dev *dev;
273
274 dev = get_qedr_dev(ibdev);
275 if (port > 1) {
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
277 return -EINVAL;
278 }
279
280 return 0;
281}
282
283static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284 unsigned long len)
285{
286 struct qedr_mm *mm;
287
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289 if (!mm)
290 return -ENOMEM;
291
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
299 */
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
302
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
306
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
311
312 return 0;
313}
314
315static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316 unsigned long len)
317{
318 bool found = false;
319 struct qedr_mm *mm;
320
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324 continue;
325
326 found = true;
327 break;
328 }
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
333
334 return found;
335}
336
337struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
339{
340 int rc;
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
345
346 if (!udata)
347 return ERR_PTR(-EFAULT);
348
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350 if (!ctx)
351 return ERR_PTR(-ENOMEM);
352
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354 if (rc) {
355 DP_ERR(dev,
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357 rc);
358 goto err;
359 }
360
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
367
368 memset(&uresp, 0, sizeof(uresp));
369
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
379
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381 if (rc)
382 goto err;
383
384 ctx->dev = dev;
385
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387 if (rc)
388 goto err;
389
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391 &ctx->ibucontext);
392 return &ctx->ibucontext;
393
394err:
395 kfree(ctx);
396 return ERR_PTR(rc);
397}
398
399int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400{
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
403 int status = 0;
404
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406 uctx);
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
414 kfree(mm);
415 }
416
417 kfree(uctx);
418 return status;
419}
420
421int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422{
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
428 int rc = 0;
429 bool found;
430
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436 vma->vm_start);
437 return -EINVAL;
438 }
439
440 found = qedr_search_mmap(ucontext, vm_page, len);
441 if (!found) {
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443 vma->vm_pgoff);
444 return -EINVAL;
445 }
446
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450 dev->db_size))) {
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
454 return -EPERM;
455 }
456
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
461 } else {
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
465 }
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467 return rc;
468}
a7efd777
RA
469
470struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd;
477 u16 pd_id;
478 int rc;
479
480 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481 (udata && context) ? "User Lib" : "Kernel");
482
483 if (!dev->rdma_ctx) {
484 DP_ERR(dev, "invlaid RDMA context\n");
485 return ERR_PTR(-EINVAL);
486 }
487
488 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489 if (!pd)
490 return ERR_PTR(-ENOMEM);
491
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id;
496
497 if (udata && context) {
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc)
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context);
502 uctx->pd = pd;
503 pd->uctx = uctx;
504 }
505
506 return &pd->ibpd;
507}
508
509int qedr_dealloc_pd(struct ib_pd *ibpd)
510{
511 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512 struct qedr_pd *pd = get_qedr_pd(ibpd);
513
ea7ef2ac 514 if (!pd) {
a7efd777 515 pr_err("Invalid PD received in dealloc_pd\n");
ea7ef2ac
CIK
516 return -EINVAL;
517 }
a7efd777
RA
518
519 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
520 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
521
522 kfree(pd);
523
524 return 0;
525}
526
527static void qedr_free_pbl(struct qedr_dev *dev,
528 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
529{
530 struct pci_dev *pdev = dev->pdev;
531 int i;
532
533 for (i = 0; i < pbl_info->num_pbls; i++) {
534 if (!pbl[i].va)
535 continue;
536 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
537 pbl[i].va, pbl[i].pa);
538 }
539
540 kfree(pbl);
541}
542
543#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
544#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
545
546#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
547#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
548#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
549
550static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
551 struct qedr_pbl_info *pbl_info,
552 gfp_t flags)
553{
554 struct pci_dev *pdev = dev->pdev;
555 struct qedr_pbl *pbl_table;
556 dma_addr_t *pbl_main_tbl;
557 dma_addr_t pa;
558 void *va;
559 int i;
560
561 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
562 if (!pbl_table)
563 return ERR_PTR(-ENOMEM);
564
565 for (i = 0; i < pbl_info->num_pbls; i++) {
566 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
567 &pa, flags);
568 if (!va)
569 goto err;
570
571 memset(va, 0, pbl_info->pbl_size);
572 pbl_table[i].va = va;
573 pbl_table[i].pa = pa;
574 }
575
576 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
577 * the first one with physical pointers to all of the rest
578 */
579 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
580 for (i = 0; i < pbl_info->num_pbls - 1; i++)
581 pbl_main_tbl[i] = pbl_table[i + 1].pa;
582
583 return pbl_table;
584
585err:
586 for (i--; i >= 0; i--)
587 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
588 pbl_table[i].va, pbl_table[i].pa);
589
590 qedr_free_pbl(dev, pbl_info, pbl_table);
591
592 return ERR_PTR(-ENOMEM);
593}
594
595static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
596 struct qedr_pbl_info *pbl_info,
597 u32 num_pbes, int two_layer_capable)
598{
599 u32 pbl_capacity;
600 u32 pbl_size;
601 u32 num_pbls;
602
603 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
604 if (num_pbes > MAX_PBES_TWO_LAYER) {
605 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
606 num_pbes);
607 return -EINVAL;
608 }
609
610 /* calculate required pbl page size */
611 pbl_size = MIN_FW_PBL_PAGE_SIZE;
612 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
613 NUM_PBES_ON_PAGE(pbl_size);
614
615 while (pbl_capacity < num_pbes) {
616 pbl_size *= 2;
617 pbl_capacity = pbl_size / sizeof(u64);
618 pbl_capacity = pbl_capacity * pbl_capacity;
619 }
620
621 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
622 num_pbls++; /* One for the layer0 ( points to the pbls) */
623 pbl_info->two_layered = true;
624 } else {
625 /* One layered PBL */
626 num_pbls = 1;
627 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
628 roundup_pow_of_two((num_pbes * sizeof(u64))));
629 pbl_info->two_layered = false;
630 }
631
632 pbl_info->num_pbls = num_pbls;
633 pbl_info->pbl_size = pbl_size;
634 pbl_info->num_pbes = num_pbes;
635
636 DP_DEBUG(dev, QEDR_MSG_MR,
637 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
638 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
639
640 return 0;
641}
642
643static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
644 struct qedr_pbl *pbl,
645 struct qedr_pbl_info *pbl_info)
646{
647 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
648 struct qedr_pbl *pbl_tbl;
649 struct scatterlist *sg;
650 struct regpair *pbe;
651 int entry;
652 u32 addr;
653
654 if (!pbl_info->num_pbes)
655 return;
656
657 /* If we have a two layered pbl, the first pbl points to the rest
658 * of the pbls and the first entry lays on the second pbl in the table
659 */
660 if (pbl_info->two_layered)
661 pbl_tbl = &pbl[1];
662 else
663 pbl_tbl = pbl;
664
665 pbe = (struct regpair *)pbl_tbl->va;
666 if (!pbe) {
667 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
668 return;
669 }
670
671 pbe_cnt = 0;
672
673 shift = ilog2(umem->page_size);
674
675 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
676 pages = sg_dma_len(sg) >> shift;
677 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
678 /* store the page address in pbe */
679 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
680 umem->page_size * pg_cnt);
681 addr = upper_32_bits(sg_dma_address(sg) +
682 umem->page_size * pg_cnt);
683 pbe->hi = cpu_to_le32(addr);
684 pbe_cnt++;
685 total_num_pbes++;
686 pbe++;
687
688 if (total_num_pbes == pbl_info->num_pbes)
689 return;
690
691 /* If the given pbl is full storing the pbes,
692 * move to next pbl.
693 */
694 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
695 pbl_tbl++;
696 pbe = (struct regpair *)pbl_tbl->va;
697 pbe_cnt = 0;
698 }
699 }
700 }
701}
702
703static int qedr_copy_cq_uresp(struct qedr_dev *dev,
704 struct qedr_cq *cq, struct ib_udata *udata)
705{
706 struct qedr_create_cq_uresp uresp;
707 int rc;
708
709 memset(&uresp, 0, sizeof(uresp));
710
711 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
712 uresp.icid = cq->icid;
713
714 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
715 if (rc)
716 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
717
718 return rc;
719}
720
721static void consume_cqe(struct qedr_cq *cq)
722{
723 if (cq->latest_cqe == cq->toggle_cqe)
724 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
725
726 cq->latest_cqe = qed_chain_consume(&cq->pbl);
727}
728
729static inline int qedr_align_cq_entries(int entries)
730{
731 u64 size, aligned_size;
732
733 /* We allocate an extra entry that we don't report to the FW. */
734 size = (entries + 1) * QEDR_CQE_SIZE;
735 aligned_size = ALIGN(size, PAGE_SIZE);
736
737 return aligned_size / QEDR_CQE_SIZE;
738}
739
740static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
741 struct qedr_dev *dev,
742 struct qedr_userq *q,
743 u64 buf_addr, size_t buf_len,
744 int access, int dmasync)
745{
746 int page_cnt;
747 int rc;
748
749 q->buf_addr = buf_addr;
750 q->buf_len = buf_len;
751 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
752 if (IS_ERR(q->umem)) {
753 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
754 PTR_ERR(q->umem));
755 return PTR_ERR(q->umem);
756 }
757
758 page_cnt = ib_umem_page_count(q->umem);
759 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
760 if (rc)
761 goto err0;
762
763 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
764 if (IS_ERR_OR_NULL(q->pbl_tbl))
765 goto err0;
766
767 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
768
769 return 0;
770
771err0:
772 ib_umem_release(q->umem);
773
774 return rc;
775}
776
777static inline void qedr_init_cq_params(struct qedr_cq *cq,
778 struct qedr_ucontext *ctx,
779 struct qedr_dev *dev, int vector,
780 int chain_entries, int page_cnt,
781 u64 pbl_ptr,
782 struct qed_rdma_create_cq_in_params
783 *params)
784{
785 memset(params, 0, sizeof(*params));
786 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
787 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
788 params->cnq_id = vector;
789 params->cq_size = chain_entries - 1;
790 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
791 params->pbl_num_pages = page_cnt;
792 params->pbl_ptr = pbl_ptr;
793 params->pbl_two_level = 0;
794}
795
796static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
797{
798 /* Flush data before signalling doorbell */
799 wmb();
800 cq->db.data.agg_flags = flags;
801 cq->db.data.value = cpu_to_le32(cons);
802 writeq(cq->db.raw, cq->db_addr);
803
804 /* Make sure write would stick */
805 mmiowb();
806}
807
808int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
809{
810 struct qedr_cq *cq = get_qedr_cq(ibcq);
811 unsigned long sflags;
812
813 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
814 return 0;
815
816 spin_lock_irqsave(&cq->cq_lock, sflags);
817
818 cq->arm_flags = 0;
819
820 if (flags & IB_CQ_SOLICITED)
821 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
822
823 if (flags & IB_CQ_NEXT_COMP)
824 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
825
826 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
827
828 spin_unlock_irqrestore(&cq->cq_lock, sflags);
829
830 return 0;
831}
832
833struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
834 const struct ib_cq_init_attr *attr,
835 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
836{
837 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
838 struct qed_rdma_destroy_cq_out_params destroy_oparams;
839 struct qed_rdma_destroy_cq_in_params destroy_iparams;
840 struct qedr_dev *dev = get_qedr_dev(ibdev);
841 struct qed_rdma_create_cq_in_params params;
842 struct qedr_create_cq_ureq ureq;
843 int vector = attr->comp_vector;
844 int entries = attr->cqe;
845 struct qedr_cq *cq;
846 int chain_entries;
847 int page_cnt;
848 u64 pbl_ptr;
849 u16 icid;
850 int rc;
851
852 DP_DEBUG(dev, QEDR_MSG_INIT,
853 "create_cq: called from %s. entries=%d, vector=%d\n",
854 udata ? "User Lib" : "Kernel", entries, vector);
855
856 if (entries > QEDR_MAX_CQES) {
857 DP_ERR(dev,
858 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
859 entries, QEDR_MAX_CQES);
860 return ERR_PTR(-EINVAL);
861 }
862
863 chain_entries = qedr_align_cq_entries(entries);
864 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
865
866 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
867 if (!cq)
868 return ERR_PTR(-ENOMEM);
869
870 if (udata) {
871 memset(&ureq, 0, sizeof(ureq));
872 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
873 DP_ERR(dev,
874 "create cq: problem copying data from user space\n");
875 goto err0;
876 }
877
878 if (!ureq.len) {
879 DP_ERR(dev,
880 "create cq: cannot create a cq with 0 entries\n");
881 goto err0;
882 }
883
884 cq->cq_type = QEDR_CQ_TYPE_USER;
885
886 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
887 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
888 if (rc)
889 goto err0;
890
891 pbl_ptr = cq->q.pbl_tbl->pa;
892 page_cnt = cq->q.pbl_info.num_pbes;
c7eb3bce
AR
893
894 cq->ibcq.cqe = chain_entries;
a7efd777
RA
895 } else {
896 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
897
898 rc = dev->ops->common->chain_alloc(dev->cdev,
899 QED_CHAIN_USE_TO_CONSUME,
900 QED_CHAIN_MODE_PBL,
901 QED_CHAIN_CNT_TYPE_U32,
902 chain_entries,
903 sizeof(union rdma_cqe),
904 &cq->pbl);
905 if (rc)
906 goto err1;
907
908 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
909 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
c7eb3bce 910 cq->ibcq.cqe = cq->pbl.capacity;
a7efd777
RA
911 }
912
913 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
914 pbl_ptr, &params);
915
916 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
917 if (rc)
918 goto err2;
919
920 cq->icid = icid;
921 cq->sig = QEDR_CQ_MAGIC_NUMBER;
922 spin_lock_init(&cq->cq_lock);
923
924 if (ib_ctx) {
925 rc = qedr_copy_cq_uresp(dev, cq, udata);
926 if (rc)
927 goto err3;
928 } else {
929 /* Generate doorbell address. */
930 cq->db_addr = dev->db_addr +
931 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
932 cq->db.data.icid = cq->icid;
933 cq->db.data.params = DB_AGG_CMD_SET <<
934 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
935
936 /* point to the very last element, passing it we will toggle */
937 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
938 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
939 cq->latest_cqe = NULL;
940 consume_cqe(cq);
941 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
942 }
943
944 DP_DEBUG(dev, QEDR_MSG_CQ,
945 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
946 cq->icid, cq, params.cq_size);
947
948 return &cq->ibcq;
949
950err3:
951 destroy_iparams.icid = cq->icid;
952 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
953 &destroy_oparams);
954err2:
955 if (udata)
956 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
957 else
958 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
959err1:
960 if (udata)
961 ib_umem_release(cq->q.umem);
962err0:
963 kfree(cq);
964 return ERR_PTR(-EINVAL);
965}
966
967int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
968{
969 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
970 struct qedr_cq *cq = get_qedr_cq(ibcq);
971
972 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
973
974 return 0;
975}
976
977int qedr_destroy_cq(struct ib_cq *ibcq)
978{
979 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
980 struct qed_rdma_destroy_cq_out_params oparams;
981 struct qed_rdma_destroy_cq_in_params iparams;
982 struct qedr_cq *cq = get_qedr_cq(ibcq);
983
984 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
985
986 /* GSIs CQs are handled by driver, so they don't exist in the FW */
987 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
a1211359
AR
988 int rc;
989
a7efd777 990 iparams.icid = cq->icid;
a1211359
AR
991 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
992 &oparams);
993 if (rc)
994 return rc;
a7efd777
RA
995 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
996 }
997
998 if (ibcq->uobject && ibcq->uobject->context) {
999 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1000 ib_umem_release(cq->q.umem);
1001 }
1002
1003 kfree(cq);
1004
1005 return 0;
1006}
cecbcddf
RA
1007
1008static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1009 struct ib_qp_attr *attr,
1010 int attr_mask,
1011 struct qed_rdma_modify_qp_in_params
1012 *qp_params)
1013{
1014 enum rdma_network_type nw_type;
1015 struct ib_gid_attr gid_attr;
1016 union ib_gid gid;
1017 u32 ipv4_addr;
1018 int rc = 0;
1019 int i;
1020
1021 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1022 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1023 if (rc)
1024 return rc;
1025
1026 if (!memcmp(&gid, &zgid, sizeof(gid)))
1027 return -ENOENT;
1028
1029 if (gid_attr.ndev) {
1030 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1031
1032 dev_put(gid_attr.ndev);
1033 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1034 switch (nw_type) {
1035 case RDMA_NETWORK_IPV6:
1036 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037 sizeof(qp_params->sgid));
1038 memcpy(&qp_params->dgid.bytes[0],
1039 &attr->ah_attr.grh.dgid,
1040 sizeof(qp_params->dgid));
1041 qp_params->roce_mode = ROCE_V2_IPV6;
1042 SET_FIELD(qp_params->modify_flags,
1043 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1044 break;
1045 case RDMA_NETWORK_IB:
1046 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047 sizeof(qp_params->sgid));
1048 memcpy(&qp_params->dgid.bytes[0],
1049 &attr->ah_attr.grh.dgid,
1050 sizeof(qp_params->dgid));
1051 qp_params->roce_mode = ROCE_V1;
1052 break;
1053 case RDMA_NETWORK_IPV4:
1054 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1055 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1056 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1057 qp_params->sgid.ipv4_addr = ipv4_addr;
1058 ipv4_addr =
1059 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1060 qp_params->dgid.ipv4_addr = ipv4_addr;
1061 SET_FIELD(qp_params->modify_flags,
1062 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1063 qp_params->roce_mode = ROCE_V2_IPV4;
1064 break;
1065 }
1066 }
1067
1068 for (i = 0; i < 4; i++) {
1069 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1070 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1071 }
1072
1073 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1074 qp_params->vlan_id = 0;
1075
1076 return 0;
1077}
1078
1079static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1080{
1081 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1082 ib_umem_release(qp->usq.umem);
1083}
1084
1085static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1086{
1087 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1088 ib_umem_release(qp->urq.umem);
1089}
1090
1091static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1092{
1093 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1094 kfree(qp->wqe_wr_id);
1095}
1096
1097static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1098{
1099 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1100 kfree(qp->rqe_wr_id);
1101}
1102
1103static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1104 struct ib_qp_init_attr *attrs)
1105{
1106 struct qedr_device_attr *qattr = &dev->attr;
1107
1108 /* QP0... attrs->qp_type == IB_QPT_GSI */
1109 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1110 DP_DEBUG(dev, QEDR_MSG_QP,
1111 "create qp: unsupported qp type=0x%x requested\n",
1112 attrs->qp_type);
1113 return -EINVAL;
1114 }
1115
1116 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1117 DP_ERR(dev,
1118 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1119 attrs->cap.max_send_wr, qattr->max_sqe);
1120 return -EINVAL;
1121 }
1122
1123 if (attrs->cap.max_inline_data > qattr->max_inline) {
1124 DP_ERR(dev,
1125 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1126 attrs->cap.max_inline_data, qattr->max_inline);
1127 return -EINVAL;
1128 }
1129
1130 if (attrs->cap.max_send_sge > qattr->max_sge) {
1131 DP_ERR(dev,
1132 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1133 attrs->cap.max_send_sge, qattr->max_sge);
1134 return -EINVAL;
1135 }
1136
1137 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1138 DP_ERR(dev,
1139 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1140 attrs->cap.max_recv_sge, qattr->max_sge);
1141 return -EINVAL;
1142 }
1143
1144 /* Unprivileged user space cannot create special QP */
1145 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1146 DP_ERR(dev,
1147 "create qp: userspace can't create special QPs of type=0x%x\n",
1148 attrs->qp_type);
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
1155static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1156 struct qedr_qp *qp)
1157{
1158 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1159 uresp->rq_icid = qp->icid;
1160}
1161
1162static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1163 struct qedr_qp *qp)
1164{
1165 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1166 uresp->sq_icid = qp->icid + 1;
1167}
1168
1169static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1170 struct qedr_qp *qp, struct ib_udata *udata)
1171{
1172 struct qedr_create_qp_uresp uresp;
1173 int rc;
1174
1175 memset(&uresp, 0, sizeof(uresp));
1176 qedr_copy_sq_uresp(&uresp, qp);
1177 qedr_copy_rq_uresp(&uresp, qp);
1178
1179 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1180 uresp.qp_id = qp->qp_id;
1181
1182 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1183 if (rc)
1184 DP_ERR(dev,
1185 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1186 qp->icid);
1187
1188 return rc;
1189}
1190
1191static void qedr_set_qp_init_params(struct qedr_dev *dev,
1192 struct qedr_qp *qp,
1193 struct qedr_pd *pd,
1194 struct ib_qp_init_attr *attrs)
1195{
1196 qp->pd = pd;
1197
1198 spin_lock_init(&qp->q_lock);
1199
1200 qp->qp_type = attrs->qp_type;
1201 qp->max_inline_data = attrs->cap.max_inline_data;
1202 qp->sq.max_sges = attrs->cap.max_send_sge;
1203 qp->state = QED_ROCE_QP_STATE_RESET;
1204 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1205 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1206 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1207 qp->dev = dev;
1208
1209 DP_DEBUG(dev, QEDR_MSG_QP,
1210 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1211 pd->pd_id, qp->qp_type, qp->max_inline_data,
1212 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1213 DP_DEBUG(dev, QEDR_MSG_QP,
1214 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1215 qp->sq.max_sges, qp->sq_cq->icid);
1216 qp->rq.max_sges = attrs->cap.max_recv_sge;
1217 DP_DEBUG(dev, QEDR_MSG_QP,
1218 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1219 qp->rq.max_sges, qp->rq_cq->icid);
1220}
1221
1222static inline void
1223qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1224 struct qedr_create_qp_ureq *ureq)
1225{
1226 /* QP handle to be written in CQE */
1227 params->qp_handle_lo = ureq->qp_handle_lo;
1228 params->qp_handle_hi = ureq->qp_handle_hi;
1229}
1230
1231static inline void
1232qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1233{
1234 qp->sq.db = dev->db_addr +
1235 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1236 qp->sq.db_data.data.icid = qp->icid + 1;
1237}
1238
1239static inline void
1240qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1241{
1242 qp->rq.db = dev->db_addr +
1243 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1244 qp->rq.db_data.data.icid = qp->icid;
1245}
1246
1247static inline int
1248qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1249 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1250{
1251 /* Allocate driver internal RQ array */
1252 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1253 GFP_KERNEL);
1254 if (!qp->rqe_wr_id)
1255 return -ENOMEM;
1256
1257 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1258
1259 return 0;
1260}
1261
1262static inline int
1263qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1264 struct qedr_qp *qp,
1265 struct ib_qp_init_attr *attrs,
1266 struct qed_rdma_create_qp_in_params *params)
1267{
1268 u32 temp_max_wr;
1269
1270 /* Allocate driver internal SQ array */
1271 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1272 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1273
1274 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1275 qp->sq.max_wr = (u16)temp_max_wr;
1276 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1277 GFP_KERNEL);
1278 if (!qp->wqe_wr_id)
1279 return -ENOMEM;
1280
1281 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1282
1283 /* QP handle to be written in CQE */
1284 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1285 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1286
1287 return 0;
1288}
1289
1290static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1291 struct qedr_qp *qp,
1292 struct ib_qp_init_attr *attrs)
1293{
1294 u32 n_sq_elems, n_sq_entries;
1295 int rc;
1296
1297 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1298 * the ring. The ring should allow at least a single WR, even if the
1299 * user requested none, due to allocation issues.
1300 */
1301 n_sq_entries = attrs->cap.max_send_wr;
1302 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1303 n_sq_entries = max_t(u32, n_sq_entries, 1);
1304 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1305 rc = dev->ops->common->chain_alloc(dev->cdev,
1306 QED_CHAIN_USE_TO_PRODUCE,
1307 QED_CHAIN_MODE_PBL,
1308 QED_CHAIN_CNT_TYPE_U32,
1309 n_sq_elems,
1310 QEDR_SQE_ELEMENT_SIZE,
1311 &qp->sq.pbl);
1312 if (rc) {
1313 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1314 return rc;
1315 }
1316
1317 DP_DEBUG(dev, QEDR_MSG_SQ,
1318 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1319 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1320 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1321 return 0;
1322}
1323
1324static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1325 struct qedr_qp *qp,
1326 struct ib_qp_init_attr *attrs)
1327{
1328 u32 n_rq_elems, n_rq_entries;
1329 int rc;
1330
1331 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1332 * the ring. There ring should allow at least a single WR, even if the
1333 * user requested none, due to allocation issues.
1334 */
1335 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1336 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1337 rc = dev->ops->common->chain_alloc(dev->cdev,
1338 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1339 QED_CHAIN_MODE_PBL,
1340 QED_CHAIN_CNT_TYPE_U32,
1341 n_rq_elems,
1342 QEDR_RQE_ELEMENT_SIZE,
1343 &qp->rq.pbl);
1344
1345 if (rc) {
1346 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1347 return -ENOMEM;
1348 }
1349
1350 DP_DEBUG(dev, QEDR_MSG_RQ,
1351 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1352 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1353 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1354
1355 /* n_rq_entries < u16 so the casting is safe */
1356 qp->rq.max_wr = (u16)n_rq_entries;
1357
1358 return 0;
1359}
1360
1361static inline void
1362qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1363 struct qedr_pd *pd,
1364 struct qedr_qp *qp,
1365 struct ib_qp_init_attr *attrs,
1366 struct ib_udata *udata,
1367 struct qed_rdma_create_qp_in_params *params)
1368{
1369 /* QP handle to be written in an async event */
1370 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1371 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1372
1373 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1374 params->fmr_and_reserved_lkey = !udata;
1375 params->pd = pd->pd_id;
1376 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1377 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1378 params->max_sq_sges = 0;
1379 params->stats_queue = 0;
1380
1381 if (udata) {
1382 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1383 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1384 } else {
1385 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1386 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1387 }
1388}
1389
1390static inline void
1391qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1392 struct ib_qp_init_attr *attrs,
1393 struct ib_udata *udata,
1394 struct qed_rdma_create_qp_in_params *params)
1395{
1396 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1397 params->srq_id = 0;
1398 params->use_srq = false;
1399
1400 if (udata) {
1401 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1402 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1403 } else {
1404 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1405 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1406 }
1407}
1408
1409static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1410{
1411 DP_DEBUG(dev, QEDR_MSG_QP,
1412 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1413 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1414 qp->urq.buf_len);
1415}
1416
1417static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1418 struct qedr_dev *dev,
1419 struct qedr_qp *qp,
1420 struct qedr_create_qp_ureq *ureq)
1421{
1422 int rc;
1423
1424 /* SQ - read access only (0), dma sync not required (0) */
1425 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1426 ureq->sq_len, 0, 0);
1427 if (rc)
1428 return rc;
1429
1430 /* RQ - read access only (0), dma sync not required (0) */
1431 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1432 ureq->rq_len, 0, 0);
1433
1434 if (rc)
1435 qedr_cleanup_user_sq(dev, qp);
1436 return rc;
1437}
1438
1439static inline int
1440qedr_init_kernel_qp(struct qedr_dev *dev,
1441 struct qedr_qp *qp,
1442 struct ib_qp_init_attr *attrs,
1443 struct qed_rdma_create_qp_in_params *params)
1444{
1445 int rc;
1446
1447 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1448 if (rc) {
1449 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1450 return rc;
1451 }
1452
1453 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1454 if (rc) {
1455 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1456 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1457 return rc;
1458 }
1459
1460 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1461 if (rc) {
1462 qedr_cleanup_kernel_sq(dev, qp);
1463 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1464 return rc;
1465 }
1466
1467 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1468 if (rc) {
1469 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1470 qedr_cleanup_kernel_sq(dev, qp);
1471 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1472 return rc;
1473 }
1474
1475 return rc;
1476}
1477
1478struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1479 struct ib_qp_init_attr *attrs,
1480 struct ib_udata *udata)
1481{
1482 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1483 struct qed_rdma_create_qp_out_params out_params;
1484 struct qed_rdma_create_qp_in_params in_params;
1485 struct qedr_pd *pd = get_qedr_pd(ibpd);
1486 struct ib_ucontext *ib_ctx = NULL;
1487 struct qedr_ucontext *ctx = NULL;
1488 struct qedr_create_qp_ureq ureq;
1489 struct qedr_qp *qp;
181d8015 1490 struct ib_qp *ibqp;
cecbcddf
RA
1491 int rc = 0;
1492
1493 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1494 udata ? "user library" : "kernel", pd);
1495
1496 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1497 if (rc)
1498 return ERR_PTR(rc);
1499
181d8015
WY
1500 if (attrs->srq)
1501 return ERR_PTR(-EINVAL);
1502
cecbcddf
RA
1503 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1504 if (!qp)
1505 return ERR_PTR(-ENOMEM);
1506
cecbcddf
RA
1507 DP_DEBUG(dev, QEDR_MSG_QP,
1508 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1509 get_qedr_cq(attrs->send_cq),
1510 get_qedr_cq(attrs->send_cq)->icid,
1511 get_qedr_cq(attrs->recv_cq),
1512 get_qedr_cq(attrs->recv_cq)->icid);
1513
1514 qedr_set_qp_init_params(dev, qp, pd, attrs);
1515
04886779
RA
1516 if (attrs->qp_type == IB_QPT_GSI) {
1517 if (udata) {
1518 DP_ERR(dev,
1519 "create qp: unexpected udata when creating GSI QP\n");
1520 goto err0;
1521 }
181d8015
WY
1522 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1523 if (IS_ERR(ibqp))
1524 kfree(qp);
1525 return ibqp;
04886779
RA
1526 }
1527
cecbcddf
RA
1528 memset(&in_params, 0, sizeof(in_params));
1529
1530 if (udata) {
1531 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1532 goto err0;
1533
1534 ib_ctx = ibpd->uobject->context;
1535 ctx = get_qedr_ucontext(ib_ctx);
1536
1537 memset(&ureq, 0, sizeof(ureq));
1538 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1539 DP_ERR(dev,
1540 "create qp: problem copying data from user space\n");
1541 goto err0;
1542 }
1543
1544 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1545 if (rc)
1546 goto err0;
1547
1548 qedr_init_qp_user_params(&in_params, &ureq);
1549 } else {
1550 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1551 if (rc)
1552 goto err0;
1553 }
1554
1555 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1556 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1557
1558 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1559 &in_params, &out_params);
1560
1561 if (!qp->qed_qp)
1562 goto err1;
1563
1564 qp->qp_id = out_params.qp_id;
1565 qp->icid = out_params.icid;
1566 qp->ibqp.qp_num = qp->qp_id;
1567
1568 if (udata) {
1569 rc = qedr_copy_qp_uresp(dev, qp, udata);
1570 if (rc)
1571 goto err2;
1572
1573 qedr_qp_user_print(dev, qp);
1574 } else {
1575 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1576 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1577 }
1578
1579 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1580 udata ? "user" : "kernel", qp);
1581
1582 return &qp->ibqp;
1583
1584err2:
1585 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1586 if (rc)
1587 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1588err1:
1589 if (udata) {
1590 qedr_cleanup_user_sq(dev, qp);
1591 qedr_cleanup_user_rq(dev, qp);
1592 } else {
1593 qedr_cleanup_kernel_sq(dev, qp);
1594 qedr_cleanup_kernel_rq(dev, qp);
1595 }
1596
1597err0:
1598 kfree(qp);
1599
1600 return ERR_PTR(-EFAULT);
1601}
1602
1603enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604{
1605 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET:
1607 return IB_QPS_RESET;
1608 case QED_ROCE_QP_STATE_INIT:
1609 return IB_QPS_INIT;
1610 case QED_ROCE_QP_STATE_RTR:
1611 return IB_QPS_RTR;
1612 case QED_ROCE_QP_STATE_RTS:
1613 return IB_QPS_RTS;
1614 case QED_ROCE_QP_STATE_SQD:
1615 return IB_QPS_SQD;
1616 case QED_ROCE_QP_STATE_ERR:
1617 return IB_QPS_ERR;
1618 case QED_ROCE_QP_STATE_SQE:
1619 return IB_QPS_SQE;
1620 }
1621 return IB_QPS_ERR;
1622}
1623
1624enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1625{
1626 switch (qp_state) {
1627 case IB_QPS_RESET:
1628 return QED_ROCE_QP_STATE_RESET;
1629 case IB_QPS_INIT:
1630 return QED_ROCE_QP_STATE_INIT;
1631 case IB_QPS_RTR:
1632 return QED_ROCE_QP_STATE_RTR;
1633 case IB_QPS_RTS:
1634 return QED_ROCE_QP_STATE_RTS;
1635 case IB_QPS_SQD:
1636 return QED_ROCE_QP_STATE_SQD;
1637 case IB_QPS_ERR:
1638 return QED_ROCE_QP_STATE_ERR;
1639 default:
1640 return QED_ROCE_QP_STATE_ERR;
1641 }
1642}
1643
1644static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1645{
1646 qed_chain_reset(&qph->pbl);
1647 qph->prod = 0;
1648 qph->cons = 0;
1649 qph->wqe_cons = 0;
1650 qph->db_data.data.value = cpu_to_le16(0);
1651}
1652
1653static int qedr_update_qp_state(struct qedr_dev *dev,
1654 struct qedr_qp *qp,
1655 enum qed_roce_qp_state new_state)
1656{
1657 int status = 0;
1658
1659 if (new_state == qp->state)
1660 return 1;
1661
1662 switch (qp->state) {
1663 case QED_ROCE_QP_STATE_RESET:
1664 switch (new_state) {
1665 case QED_ROCE_QP_STATE_INIT:
1666 qp->prev_wqe_size = 0;
1667 qedr_reset_qp_hwq_info(&qp->sq);
1668 qedr_reset_qp_hwq_info(&qp->rq);
1669 break;
1670 default:
1671 status = -EINVAL;
1672 break;
1673 };
1674 break;
1675 case QED_ROCE_QP_STATE_INIT:
1676 switch (new_state) {
1677 case QED_ROCE_QP_STATE_RTR:
1678 /* Update doorbell (in case post_recv was
1679 * done before move to RTR)
1680 */
1681 wmb();
1682 writel(qp->rq.db_data.raw, qp->rq.db);
1683 /* Make sure write takes effect */
1684 mmiowb();
1685 break;
1686 case QED_ROCE_QP_STATE_ERR:
1687 break;
1688 default:
1689 /* Invalid state change. */
1690 status = -EINVAL;
1691 break;
1692 };
1693 break;
1694 case QED_ROCE_QP_STATE_RTR:
1695 /* RTR->XXX */
1696 switch (new_state) {
1697 case QED_ROCE_QP_STATE_RTS:
1698 break;
1699 case QED_ROCE_QP_STATE_ERR:
1700 break;
1701 default:
1702 /* Invalid state change. */
1703 status = -EINVAL;
1704 break;
1705 };
1706 break;
1707 case QED_ROCE_QP_STATE_RTS:
1708 /* RTS->XXX */
1709 switch (new_state) {
1710 case QED_ROCE_QP_STATE_SQD:
1711 break;
1712 case QED_ROCE_QP_STATE_ERR:
1713 break;
1714 default:
1715 /* Invalid state change. */
1716 status = -EINVAL;
1717 break;
1718 };
1719 break;
1720 case QED_ROCE_QP_STATE_SQD:
1721 /* SQD->XXX */
1722 switch (new_state) {
1723 case QED_ROCE_QP_STATE_RTS:
1724 case QED_ROCE_QP_STATE_ERR:
1725 break;
1726 default:
1727 /* Invalid state change. */
1728 status = -EINVAL;
1729 break;
1730 };
1731 break;
1732 case QED_ROCE_QP_STATE_ERR:
1733 /* ERR->XXX */
1734 switch (new_state) {
1735 case QED_ROCE_QP_STATE_RESET:
1736 break;
1737 default:
1738 status = -EINVAL;
1739 break;
1740 };
1741 break;
1742 default:
1743 status = -EINVAL;
1744 break;
1745 };
1746
1747 return status;
1748}
1749
1750int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1751 int attr_mask, struct ib_udata *udata)
1752{
1753 struct qedr_qp *qp = get_qedr_qp(ibqp);
1754 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1755 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1756 enum ib_qp_state old_qp_state, new_qp_state;
1757 int rc = 0;
1758
1759 DP_DEBUG(dev, QEDR_MSG_QP,
1760 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1761 attr->qp_state);
1762
1763 old_qp_state = qedr_get_ibqp_state(qp->state);
1764 if (attr_mask & IB_QP_STATE)
1765 new_qp_state = attr->qp_state;
1766 else
1767 new_qp_state = old_qp_state;
1768
1769 if (!ib_modify_qp_is_ok
1770 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1771 IB_LINK_LAYER_ETHERNET)) {
1772 DP_ERR(dev,
1773 "modify qp: invalid attribute mask=0x%x specified for\n"
1774 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1775 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1776 new_qp_state);
1777 rc = -EINVAL;
1778 goto err;
1779 }
1780
1781 /* Translate the masks... */
1782 if (attr_mask & IB_QP_STATE) {
1783 SET_FIELD(qp_params.modify_flags,
1784 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1785 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1786 }
1787
1788 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1789 qp_params.sqd_async = true;
1790
1791 if (attr_mask & IB_QP_PKEY_INDEX) {
1792 SET_FIELD(qp_params.modify_flags,
1793 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1794 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1795 rc = -EINVAL;
1796 goto err;
1797 }
1798
1799 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1800 }
1801
1802 if (attr_mask & IB_QP_QKEY)
1803 qp->qkey = attr->qkey;
1804
1805 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1806 SET_FIELD(qp_params.modify_flags,
1807 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1808 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1809 IB_ACCESS_REMOTE_READ;
1810 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1811 IB_ACCESS_REMOTE_WRITE;
1812 qp_params.incoming_atomic_en = attr->qp_access_flags &
1813 IB_ACCESS_REMOTE_ATOMIC;
1814 }
1815
1816 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1817 if (attr_mask & IB_QP_PATH_MTU) {
1818 if (attr->path_mtu < IB_MTU_256 ||
1819 attr->path_mtu > IB_MTU_4096) {
1820 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1821 rc = -EINVAL;
1822 goto err;
1823 }
1824 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1825 ib_mtu_enum_to_int(iboe_get_mtu
1826 (dev->ndev->mtu)));
1827 }
1828
1829 if (!qp->mtu) {
1830 qp->mtu =
1831 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1832 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1833 }
1834
1835 SET_FIELD(qp_params.modify_flags,
1836 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1837
1838 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1839 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1840 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1841
1842 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1843
1844 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1845 if (rc) {
1846 DP_ERR(dev,
1847 "modify qp: problems with GID index %d (rc=%d)\n",
1848 attr->ah_attr.grh.sgid_index, rc);
1849 return rc;
1850 }
1851
1852 rc = qedr_get_dmac(dev, &attr->ah_attr,
1853 qp_params.remote_mac_addr);
1854 if (rc)
1855 return rc;
1856
1857 qp_params.use_local_mac = true;
1858 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1859
1860 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1861 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1862 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1863 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1864 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867 qp_params.remote_mac_addr);
1868;
1869
1870 qp_params.mtu = qp->mtu;
1871 qp_params.lb_indication = false;
1872 }
1873
1874 if (!qp_params.mtu) {
1875 /* Stay with current MTU */
1876 if (qp->mtu)
1877 qp_params.mtu = qp->mtu;
1878 else
1879 qp_params.mtu =
1880 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1881 }
1882
1883 if (attr_mask & IB_QP_TIMEOUT) {
1884 SET_FIELD(qp_params.modify_flags,
1885 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1886
1887 qp_params.ack_timeout = attr->timeout;
1888 if (attr->timeout) {
1889 u32 temp;
1890
1891 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1892 /* FW requires [msec] */
1893 qp_params.ack_timeout = temp;
1894 } else {
1895 /* Infinite */
1896 qp_params.ack_timeout = 0;
1897 }
1898 }
1899 if (attr_mask & IB_QP_RETRY_CNT) {
1900 SET_FIELD(qp_params.modify_flags,
1901 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1902 qp_params.retry_cnt = attr->retry_cnt;
1903 }
1904
1905 if (attr_mask & IB_QP_RNR_RETRY) {
1906 SET_FIELD(qp_params.modify_flags,
1907 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1908 qp_params.rnr_retry_cnt = attr->rnr_retry;
1909 }
1910
1911 if (attr_mask & IB_QP_RQ_PSN) {
1912 SET_FIELD(qp_params.modify_flags,
1913 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1914 qp_params.rq_psn = attr->rq_psn;
1915 qp->rq_psn = attr->rq_psn;
1916 }
1917
1918 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1919 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1920 rc = -EINVAL;
1921 DP_ERR(dev,
1922 "unsupported max_rd_atomic=%d, supported=%d\n",
1923 attr->max_rd_atomic,
1924 dev->attr.max_qp_req_rd_atomic_resc);
1925 goto err;
1926 }
1927
1928 SET_FIELD(qp_params.modify_flags,
1929 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1930 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1931 }
1932
1933 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1934 SET_FIELD(qp_params.modify_flags,
1935 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1936 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1937 }
1938
1939 if (attr_mask & IB_QP_SQ_PSN) {
1940 SET_FIELD(qp_params.modify_flags,
1941 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1942 qp_params.sq_psn = attr->sq_psn;
1943 qp->sq_psn = attr->sq_psn;
1944 }
1945
1946 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1947 if (attr->max_dest_rd_atomic >
1948 dev->attr.max_qp_resp_rd_atomic_resc) {
1949 DP_ERR(dev,
1950 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1951 attr->max_dest_rd_atomic,
1952 dev->attr.max_qp_resp_rd_atomic_resc);
1953
1954 rc = -EINVAL;
1955 goto err;
1956 }
1957
1958 SET_FIELD(qp_params.modify_flags,
1959 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1960 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1961 }
1962
1963 if (attr_mask & IB_QP_DEST_QPN) {
1964 SET_FIELD(qp_params.modify_flags,
1965 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1966
1967 qp_params.dest_qp = attr->dest_qp_num;
1968 qp->dest_qp_num = attr->dest_qp_num;
1969 }
1970
1971 if (qp->qp_type != IB_QPT_GSI)
1972 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1973 qp->qed_qp, &qp_params);
1974
1975 if (attr_mask & IB_QP_STATE) {
1976 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
d6ebbf29 1977 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
cecbcddf
RA
1978 qp->state = qp_params.new_state;
1979 }
1980
1981err:
1982 return rc;
1983}
1984
1985static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1986{
1987 int ib_qp_acc_flags = 0;
1988
1989 if (params->incoming_rdma_write_en)
1990 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1991 if (params->incoming_rdma_read_en)
1992 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1993 if (params->incoming_atomic_en)
1994 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1995 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1996 return ib_qp_acc_flags;
1997}
1998
1999int qedr_query_qp(struct ib_qp *ibqp,
2000 struct ib_qp_attr *qp_attr,
2001 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2002{
2003 struct qed_rdma_query_qp_out_params params;
2004 struct qedr_qp *qp = get_qedr_qp(ibqp);
2005 struct qedr_dev *dev = qp->dev;
2006 int rc = 0;
2007
2008 memset(&params, 0, sizeof(params));
2009
2010 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2011 if (rc)
2012 goto err;
2013
2014 memset(qp_attr, 0, sizeof(*qp_attr));
2015 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2016
2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019 qp_attr->path_mtu = iboe_get_mtu(params.mtu);
2020 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021 qp_attr->rq_psn = params.rq_psn;
2022 qp_attr->sq_psn = params.sq_psn;
2023 qp_attr->dest_qp_num = params.dest_qp;
2024
2025 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2026
2027 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031 qp_attr->cap.max_inline_data = qp->max_inline_data;
2032 qp_init_attr->cap = qp_attr->cap;
2033
2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2035 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2036
2037 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2038 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2039 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2040 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2041
2042 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2043 qp_attr->ah_attr.port_num = 1;
2044 qp_attr->ah_attr.sl = 0;
2045 qp_attr->timeout = params.timeout;
2046 qp_attr->rnr_retry = params.rnr_retry;
2047 qp_attr->retry_cnt = params.retry_cnt;
2048 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2049 qp_attr->pkey_index = params.pkey_index;
2050 qp_attr->port_num = 1;
2051 qp_attr->ah_attr.src_path_bits = 0;
2052 qp_attr->ah_attr.static_rate = 0;
2053 qp_attr->alt_pkey_index = 0;
2054 qp_attr->alt_port_num = 0;
2055 qp_attr->alt_timeout = 0;
2056 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2057
2058 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2059 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2060 qp_attr->max_rd_atomic = params.max_rd_atomic;
2061 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2062
2063 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2064 qp_attr->cap.max_inline_data);
2065
2066err:
2067 return rc;
2068}
2069
2070int qedr_destroy_qp(struct ib_qp *ibqp)
2071{
2072 struct qedr_qp *qp = get_qedr_qp(ibqp);
2073 struct qedr_dev *dev = qp->dev;
2074 struct ib_qp_attr attr;
2075 int attr_mask = 0;
2076 int rc = 0;
2077
2078 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2079 qp, qp->qp_type);
2080
2081 if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2082 QED_ROCE_QP_STATE_INIT)) {
2083 attr.qp_state = IB_QPS_ERR;
2084 attr_mask |= IB_QP_STATE;
2085
2086 /* Change the QP state to ERROR */
2087 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2088 }
2089
2090 if (qp->qp_type != IB_QPT_GSI) {
2091 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2092 if (rc)
2093 return rc;
04886779
RA
2094 } else {
2095 qedr_destroy_gsi_qp(dev);
cecbcddf
RA
2096 }
2097
2098 if (ibqp->uobject && ibqp->uobject->context) {
2099 qedr_cleanup_user_sq(dev, qp);
2100 qedr_cleanup_user_rq(dev, qp);
2101 } else {
2102 qedr_cleanup_kernel_sq(dev, qp);
2103 qedr_cleanup_kernel_rq(dev, qp);
2104 }
2105
2106 kfree(qp);
2107
2108 return rc;
2109}
e0290cce 2110
477864c8
MS
2111struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2112 struct ib_udata *udata)
04886779
RA
2113{
2114 struct qedr_ah *ah;
2115
2116 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2117 if (!ah)
2118 return ERR_PTR(-ENOMEM);
2119
2120 ah->attr = *attr;
2121
2122 return &ah->ibah;
2123}
2124
2125int qedr_destroy_ah(struct ib_ah *ibah)
2126{
2127 struct qedr_ah *ah = get_qedr_ah(ibah);
2128
2129 kfree(ah);
2130 return 0;
2131}
2132
e0290cce
RA
2133static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2134{
2135 struct qedr_pbl *pbl, *tmp;
2136
2137 if (info->pbl_table)
2138 list_add_tail(&info->pbl_table->list_entry,
2139 &info->free_pbl_list);
2140
2141 if (!list_empty(&info->inuse_pbl_list))
2142 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2143
2144 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2145 list_del(&pbl->list_entry);
2146 qedr_free_pbl(dev, &info->pbl_info, pbl);
2147 }
2148}
2149
2150static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2151 size_t page_list_len, bool two_layered)
2152{
2153 struct qedr_pbl *tmp;
2154 int rc;
2155
2156 INIT_LIST_HEAD(&info->free_pbl_list);
2157 INIT_LIST_HEAD(&info->inuse_pbl_list);
2158
2159 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2160 page_list_len, two_layered);
2161 if (rc)
2162 goto done;
2163
2164 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2165 if (!info->pbl_table) {
2166 rc = -ENOMEM;
2167 goto done;
2168 }
2169
2170 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2171 &info->pbl_table->pa);
2172
2173 /* in usual case we use 2 PBLs, so we add one to free
2174 * list and allocating another one
2175 */
2176 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2177 if (!tmp) {
2178 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2179 goto done;
2180 }
2181
2182 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2183
2184 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2185
2186done:
2187 if (rc)
2188 free_mr_info(dev, info);
2189
2190 return rc;
2191}
2192
2193struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2194 u64 usr_addr, int acc, struct ib_udata *udata)
2195{
2196 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2197 struct qedr_mr *mr;
2198 struct qedr_pd *pd;
2199 int rc = -ENOMEM;
2200
2201 pd = get_qedr_pd(ibpd);
2202 DP_DEBUG(dev, QEDR_MSG_MR,
2203 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2204 pd->pd_id, start, len, usr_addr, acc);
2205
2206 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2207 return ERR_PTR(-EINVAL);
2208
2209 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2210 if (!mr)
2211 return ERR_PTR(rc);
2212
2213 mr->type = QEDR_MR_USER;
2214
2215 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2216 if (IS_ERR(mr->umem)) {
2217 rc = -EFAULT;
2218 goto err0;
2219 }
2220
2221 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2222 if (rc)
2223 goto err1;
2224
2225 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2226 &mr->info.pbl_info);
2227
2228 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2229 if (rc) {
2230 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2231 goto err1;
2232 }
2233
2234 /* Index only, 18 bit long, lkey = itid << 8 | key */
2235 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2236 mr->hw_mr.key = 0;
2237 mr->hw_mr.pd = pd->pd_id;
2238 mr->hw_mr.local_read = 1;
2239 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2240 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2241 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2242 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2243 mr->hw_mr.mw_bind = false;
2244 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2245 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2246 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2247 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2248 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2249 mr->hw_mr.length = len;
2250 mr->hw_mr.vaddr = usr_addr;
2251 mr->hw_mr.zbva = false;
2252 mr->hw_mr.phy_mr = false;
2253 mr->hw_mr.dma_mr = false;
2254
2255 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2256 if (rc) {
2257 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2258 goto err2;
2259 }
2260
2261 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2262 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2263 mr->hw_mr.remote_atomic)
2264 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2265
2266 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2267 mr->ibmr.lkey);
2268 return &mr->ibmr;
2269
2270err2:
2271 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2272err1:
2273 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2274err0:
2275 kfree(mr);
2276 return ERR_PTR(rc);
2277}
2278
2279int qedr_dereg_mr(struct ib_mr *ib_mr)
2280{
2281 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2282 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2283 int rc = 0;
2284
2285 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2286 if (rc)
2287 return rc;
2288
2289 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2290
2291 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2292 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2293
2294 /* it could be user registered memory. */
2295 if (mr->umem)
2296 ib_umem_release(mr->umem);
2297
2298 kfree(mr);
2299
2300 return rc;
2301}
2302
2303struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2304{
2305 struct qedr_pd *pd = get_qedr_pd(ibpd);
2306 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2307 struct qedr_mr *mr;
2308 int rc = -ENOMEM;
2309
2310 DP_DEBUG(dev, QEDR_MSG_MR,
2311 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2312 max_page_list_len);
2313
2314 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2315 if (!mr)
2316 return ERR_PTR(rc);
2317
2318 mr->dev = dev;
2319 mr->type = QEDR_MR_FRMR;
2320
2321 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2322 if (rc)
2323 goto err0;
2324
2325 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2326 if (rc) {
2327 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2328 goto err0;
2329 }
2330
2331 /* Index only, 18 bit long, lkey = itid << 8 | key */
2332 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2333 mr->hw_mr.key = 0;
2334 mr->hw_mr.pd = pd->pd_id;
2335 mr->hw_mr.local_read = 1;
2336 mr->hw_mr.local_write = 0;
2337 mr->hw_mr.remote_read = 0;
2338 mr->hw_mr.remote_write = 0;
2339 mr->hw_mr.remote_atomic = 0;
2340 mr->hw_mr.mw_bind = false;
2341 mr->hw_mr.pbl_ptr = 0;
2342 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2343 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2344 mr->hw_mr.fbo = 0;
2345 mr->hw_mr.length = 0;
2346 mr->hw_mr.vaddr = 0;
2347 mr->hw_mr.zbva = false;
2348 mr->hw_mr.phy_mr = true;
2349 mr->hw_mr.dma_mr = false;
2350
2351 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2352 if (rc) {
2353 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2354 goto err1;
2355 }
2356
2357 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2358 mr->ibmr.rkey = mr->ibmr.lkey;
2359
2360 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2361 return mr;
2362
2363err1:
2364 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2365err0:
2366 kfree(mr);
2367 return ERR_PTR(rc);
2368}
2369
2370struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2371 enum ib_mr_type mr_type, u32 max_num_sg)
2372{
2373 struct qedr_dev *dev;
2374 struct qedr_mr *mr;
2375
2376 if (mr_type != IB_MR_TYPE_MEM_REG)
2377 return ERR_PTR(-EINVAL);
2378
2379 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2380
2381 if (IS_ERR(mr))
2382 return ERR_PTR(-EINVAL);
2383
2384 dev = mr->dev;
2385
2386 return &mr->ibmr;
2387}
2388
2389static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2390{
2391 struct qedr_mr *mr = get_qedr_mr(ibmr);
2392 struct qedr_pbl *pbl_table;
2393 struct regpair *pbe;
2394 u32 pbes_in_page;
2395
2396 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2397 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2398 return -ENOMEM;
2399 }
2400
2401 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2402 mr->npages, addr);
2403
2404 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2405 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2406 pbe = (struct regpair *)pbl_table->va;
2407 pbe += mr->npages % pbes_in_page;
2408 pbe->lo = cpu_to_le32((u32)addr);
2409 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2410
2411 mr->npages++;
2412
2413 return 0;
2414}
2415
2416static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2417{
2418 int work = info->completed - info->completed_handled - 1;
2419
2420 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2421 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2422 struct qedr_pbl *pbl;
2423
2424 /* Free all the page list that are possible to be freed
2425 * (all the ones that were invalidated), under the assumption
2426 * that if an FMR was completed successfully that means that
2427 * if there was an invalidate operation before it also ended
2428 */
2429 pbl = list_first_entry(&info->inuse_pbl_list,
2430 struct qedr_pbl, list_entry);
aafec388 2431 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
e0290cce
RA
2432 info->completed_handled++;
2433 }
2434}
2435
2436int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2437 int sg_nents, unsigned int *sg_offset)
2438{
2439 struct qedr_mr *mr = get_qedr_mr(ibmr);
2440
2441 mr->npages = 0;
2442
2443 handle_completed_mrs(mr->dev, &mr->info);
2444 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2445}
2446
2447struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2448{
2449 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2450 struct qedr_pd *pd = get_qedr_pd(ibpd);
2451 struct qedr_mr *mr;
2452 int rc;
2453
2454 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2455 if (!mr)
2456 return ERR_PTR(-ENOMEM);
2457
2458 mr->type = QEDR_MR_DMA;
2459
2460 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2461 if (rc) {
2462 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2463 goto err1;
2464 }
2465
2466 /* index only, 18 bit long, lkey = itid << 8 | key */
2467 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2468 mr->hw_mr.pd = pd->pd_id;
2469 mr->hw_mr.local_read = 1;
2470 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2471 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2472 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2473 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2474 mr->hw_mr.dma_mr = true;
2475
2476 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2477 if (rc) {
2478 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2479 goto err2;
2480 }
2481
2482 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2483 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2484 mr->hw_mr.remote_atomic)
2485 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2486
2487 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2488 return &mr->ibmr;
2489
2490err2:
2491 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2492err1:
2493 kfree(mr);
2494 return ERR_PTR(rc);
2495}
afa0e13b
RA
2496
2497static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2498{
2499 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2500}
2501
2502static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2503{
2504 int i, len = 0;
2505
2506 for (i = 0; i < num_sge; i++)
2507 len += sg_list[i].length;
2508
2509 return len;
2510}
2511
2512static void swap_wqe_data64(u64 *p)
2513{
2514 int i;
2515
2516 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2517 *p = cpu_to_be64(cpu_to_le64(*p));
2518}
2519
2520static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2521 struct qedr_qp *qp, u8 *wqe_size,
2522 struct ib_send_wr *wr,
2523 struct ib_send_wr **bad_wr, u8 *bits,
2524 u8 bit)
2525{
2526 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2527 char *seg_prt, *wqe;
2528 int i, seg_siz;
2529
2530 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2531 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2532 *bad_wr = wr;
2533 return 0;
2534 }
2535
2536 if (!data_size)
2537 return data_size;
2538
2539 *bits |= bit;
2540
2541 seg_prt = NULL;
2542 wqe = NULL;
2543 seg_siz = 0;
2544
2545 /* Copy data inline */
2546 for (i = 0; i < wr->num_sge; i++) {
2547 u32 len = wr->sg_list[i].length;
2548 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2549
2550 while (len > 0) {
2551 u32 cur;
2552
2553 /* New segment required */
2554 if (!seg_siz) {
2555 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2556 seg_prt = wqe;
2557 seg_siz = sizeof(struct rdma_sq_common_wqe);
2558 (*wqe_size)++;
2559 }
2560
2561 /* Calculate currently allowed length */
2562 cur = min_t(u32, len, seg_siz);
2563 memcpy(seg_prt, src, cur);
2564
2565 /* Update segment variables */
2566 seg_prt += cur;
2567 seg_siz -= cur;
2568
2569 /* Update sge variables */
2570 src += cur;
2571 len -= cur;
2572
2573 /* Swap fully-completed segments */
2574 if (!seg_siz)
2575 swap_wqe_data64((u64 *)wqe);
2576 }
2577 }
2578
2579 /* swap last not completed segment */
2580 if (seg_siz)
2581 swap_wqe_data64((u64 *)wqe);
2582
2583 return data_size;
2584}
2585
2586#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2587 do { \
2588 DMA_REGPAIR_LE(sge->addr, vaddr); \
2589 (sge)->length = cpu_to_le32(vlength); \
2590 (sge)->flags = cpu_to_le32(vflags); \
2591 } while (0)
2592
2593#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2594 do { \
2595 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2596 (hdr)->num_sges = num_sge; \
2597 } while (0)
2598
2599#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2600 do { \
2601 DMA_REGPAIR_LE(sge->addr, vaddr); \
2602 (sge)->length = cpu_to_le32(vlength); \
2603 (sge)->l_key = cpu_to_le32(vlkey); \
2604 } while (0)
2605
2606static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2607 struct ib_send_wr *wr)
2608{
2609 u32 data_size = 0;
2610 int i;
2611
2612 for (i = 0; i < wr->num_sge; i++) {
2613 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2614
2615 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2616 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2617 sge->length = cpu_to_le32(wr->sg_list[i].length);
2618 data_size += wr->sg_list[i].length;
2619 }
2620
2621 if (wqe_size)
2622 *wqe_size += wr->num_sge;
2623
2624 return data_size;
2625}
2626
2627static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2628 struct qedr_qp *qp,
2629 struct rdma_sq_rdma_wqe_1st *rwqe,
2630 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2631 struct ib_send_wr *wr,
2632 struct ib_send_wr **bad_wr)
2633{
2634 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2635 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2636
2637 if (wr->send_flags & IB_SEND_INLINE) {
2638 u8 flags = 0;
2639
2640 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2641 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2642 bad_wr, &rwqe->flags, flags);
2643 }
2644
2645 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2646}
2647
2648static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2649 struct qedr_qp *qp,
2650 struct rdma_sq_send_wqe_1st *swqe,
2651 struct rdma_sq_send_wqe_2st *swqe2,
2652 struct ib_send_wr *wr,
2653 struct ib_send_wr **bad_wr)
2654{
2655 memset(swqe2, 0, sizeof(*swqe2));
2656 if (wr->send_flags & IB_SEND_INLINE) {
2657 u8 flags = 0;
2658
2659 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2660 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2661 bad_wr, &swqe->flags, flags);
2662 }
2663
2664 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2665}
2666
2667static int qedr_prepare_reg(struct qedr_qp *qp,
2668 struct rdma_sq_fmr_wqe_1st *fwqe1,
2669 struct ib_reg_wr *wr)
2670{
2671 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2672 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2673
2674 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2675 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2676 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2677 fwqe1->l_key = wr->key;
2678
2679 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2680 !!(wr->access & IB_ACCESS_REMOTE_READ));
2681 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2682 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2683 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2684 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2685 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2686 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2687 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2688 fwqe2->fmr_ctrl = 0;
2689
2690 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2691 ilog2(mr->ibmr.page_size) - 12);
2692
2693 fwqe2->length_hi = 0;
2694 fwqe2->length_lo = mr->ibmr.length;
2695 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2696 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2697
2698 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2699
2700 return 0;
2701}
2702
2703enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2704{
2705 switch (opcode) {
2706 case IB_WR_RDMA_WRITE:
2707 case IB_WR_RDMA_WRITE_WITH_IMM:
2708 return IB_WC_RDMA_WRITE;
2709 case IB_WR_SEND_WITH_IMM:
2710 case IB_WR_SEND:
2711 case IB_WR_SEND_WITH_INV:
2712 return IB_WC_SEND;
2713 case IB_WR_RDMA_READ:
2714 return IB_WC_RDMA_READ;
2715 case IB_WR_ATOMIC_CMP_AND_SWP:
2716 return IB_WC_COMP_SWAP;
2717 case IB_WR_ATOMIC_FETCH_AND_ADD:
2718 return IB_WC_FETCH_ADD;
2719 case IB_WR_REG_MR:
2720 return IB_WC_REG_MR;
2721 case IB_WR_LOCAL_INV:
2722 return IB_WC_LOCAL_INV;
2723 default:
2724 return IB_WC_SEND;
2725 }
2726}
2727
2728inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2729{
2730 int wq_is_full, err_wr, pbl_is_full;
2731 struct qedr_dev *dev = qp->dev;
2732
2733 /* prevent SQ overflow and/or processing of a bad WR */
2734 err_wr = wr->num_sge > qp->sq.max_sges;
2735 wq_is_full = qedr_wq_is_full(&qp->sq);
2736 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2737 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2738 if (wq_is_full || err_wr || pbl_is_full) {
2739 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2740 DP_ERR(dev,
2741 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2742 qp);
2743 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2744 }
2745
2746 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2747 DP_ERR(dev,
2748 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2749 qp);
2750 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2751 }
2752
2753 if (pbl_is_full &&
2754 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2755 DP_ERR(dev,
2756 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2757 qp);
2758 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2759 }
2760 return false;
2761 }
2762 return true;
2763}
2764
2765int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2766 struct ib_send_wr **bad_wr)
2767{
2768 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2769 struct qedr_qp *qp = get_qedr_qp(ibqp);
2770 struct rdma_sq_atomic_wqe_1st *awqe1;
2771 struct rdma_sq_atomic_wqe_2nd *awqe2;
2772 struct rdma_sq_atomic_wqe_3rd *awqe3;
2773 struct rdma_sq_send_wqe_2st *swqe2;
2774 struct rdma_sq_local_inv_wqe *iwqe;
2775 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2776 struct rdma_sq_send_wqe_1st *swqe;
2777 struct rdma_sq_rdma_wqe_1st *rwqe;
2778 struct rdma_sq_fmr_wqe_1st *fwqe1;
2779 struct rdma_sq_common_wqe *wqe;
2780 u32 length;
2781 int rc = 0;
2782 bool comp;
2783
2784 if (!qedr_can_post_send(qp, wr)) {
2785 *bad_wr = wr;
2786 return -ENOMEM;
2787 }
2788
2789 wqe = qed_chain_produce(&qp->sq.pbl);
2790 qp->wqe_wr_id[qp->sq.prod].signaled =
2791 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2792
2793 wqe->flags = 0;
2794 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2795 !!(wr->send_flags & IB_SEND_SOLICITED));
2796 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2797 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2798 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2799 !!(wr->send_flags & IB_SEND_FENCE));
2800 wqe->prev_wqe_size = qp->prev_wqe_size;
2801
2802 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2803
2804 switch (wr->opcode) {
2805 case IB_WR_SEND_WITH_IMM:
2806 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2807 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2808 swqe->wqe_size = 2;
2809 swqe2 = qed_chain_produce(&qp->sq.pbl);
2810
2811 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2812 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2813 wr, bad_wr);
2814 swqe->length = cpu_to_le32(length);
2815 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2816 qp->prev_wqe_size = swqe->wqe_size;
2817 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2818 break;
2819 case IB_WR_SEND:
2820 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2821 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2822
2823 swqe->wqe_size = 2;
2824 swqe2 = qed_chain_produce(&qp->sq.pbl);
2825 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2826 wr, bad_wr);
2827 swqe->length = cpu_to_le32(length);
2828 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2829 qp->prev_wqe_size = swqe->wqe_size;
2830 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2831 break;
2832 case IB_WR_SEND_WITH_INV:
2833 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2834 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2835 swqe2 = qed_chain_produce(&qp->sq.pbl);
2836 swqe->wqe_size = 2;
2837 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2838 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2839 wr, bad_wr);
2840 swqe->length = cpu_to_le32(length);
2841 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2842 qp->prev_wqe_size = swqe->wqe_size;
2843 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2844 break;
2845
2846 case IB_WR_RDMA_WRITE_WITH_IMM:
2847 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2848 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2849
2850 rwqe->wqe_size = 2;
2851 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2852 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2853 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2854 wr, bad_wr);
2855 rwqe->length = cpu_to_le32(length);
2856 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2857 qp->prev_wqe_size = rwqe->wqe_size;
2858 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2859 break;
2860 case IB_WR_RDMA_WRITE:
2861 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2862 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2863
2864 rwqe->wqe_size = 2;
2865 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2866 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2867 wr, bad_wr);
2868 rwqe->length = cpu_to_le32(length);
2869 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2870 qp->prev_wqe_size = rwqe->wqe_size;
2871 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2872 break;
2873 case IB_WR_RDMA_READ_WITH_INV:
2874 DP_ERR(dev,
2875 "RDMA READ WITH INVALIDATE not supported\n");
2876 *bad_wr = wr;
2877 rc = -EINVAL;
2878 break;
2879
2880 case IB_WR_RDMA_READ:
2881 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2882 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2883
2884 rwqe->wqe_size = 2;
2885 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2886 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2887 wr, bad_wr);
2888 rwqe->length = cpu_to_le32(length);
2889 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2890 qp->prev_wqe_size = rwqe->wqe_size;
2891 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2892 break;
2893
2894 case IB_WR_ATOMIC_CMP_AND_SWP:
2895 case IB_WR_ATOMIC_FETCH_AND_ADD:
2896 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2897 awqe1->wqe_size = 4;
2898
2899 awqe2 = qed_chain_produce(&qp->sq.pbl);
2900 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2901 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2902
2903 awqe3 = qed_chain_produce(&qp->sq.pbl);
2904
2905 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2906 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2907 DMA_REGPAIR_LE(awqe3->swap_data,
2908 atomic_wr(wr)->compare_add);
2909 } else {
2910 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2911 DMA_REGPAIR_LE(awqe3->swap_data,
2912 atomic_wr(wr)->swap);
2913 DMA_REGPAIR_LE(awqe3->cmp_data,
2914 atomic_wr(wr)->compare_add);
2915 }
2916
2917 qedr_prepare_sq_sges(qp, NULL, wr);
2918
2919 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2920 qp->prev_wqe_size = awqe1->wqe_size;
2921 break;
2922
2923 case IB_WR_LOCAL_INV:
2924 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2925 iwqe->wqe_size = 1;
2926
2927 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2928 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2929 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2930 qp->prev_wqe_size = iwqe->wqe_size;
2931 break;
2932 case IB_WR_REG_MR:
2933 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2934 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2935 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2936 fwqe1->wqe_size = 2;
2937
2938 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2939 if (rc) {
2940 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2941 *bad_wr = wr;
2942 break;
2943 }
2944
2945 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2946 qp->prev_wqe_size = fwqe1->wqe_size;
2947 break;
2948 default:
2949 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2950 rc = -EINVAL;
2951 *bad_wr = wr;
2952 break;
2953 }
2954
2955 if (*bad_wr) {
2956 u16 value;
2957
2958 /* Restore prod to its position before
2959 * this WR was processed
2960 */
2961 value = le16_to_cpu(qp->sq.db_data.data.value);
2962 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2963
2964 /* Restore prev_wqe_size */
2965 qp->prev_wqe_size = wqe->prev_wqe_size;
2966 rc = -EINVAL;
2967 DP_ERR(dev, "POST SEND FAILED\n");
2968 }
2969
2970 return rc;
2971}
2972
2973int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2974 struct ib_send_wr **bad_wr)
2975{
2976 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2977 struct qedr_qp *qp = get_qedr_qp(ibqp);
2978 unsigned long flags;
2979 int rc = 0;
2980
2981 *bad_wr = NULL;
2982
04886779
RA
2983 if (qp->qp_type == IB_QPT_GSI)
2984 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2985
afa0e13b
RA
2986 spin_lock_irqsave(&qp->q_lock, flags);
2987
2988 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
2989 (qp->state == QED_ROCE_QP_STATE_ERR)) {
2990 spin_unlock_irqrestore(&qp->q_lock, flags);
2991 *bad_wr = wr;
2992 DP_DEBUG(dev, QEDR_MSG_CQ,
2993 "QP in wrong state! QP icid=0x%x state %d\n",
2994 qp->icid, qp->state);
2995 return -EINVAL;
2996 }
2997
afa0e13b
RA
2998 while (wr) {
2999 rc = __qedr_post_send(ibqp, wr, bad_wr);
3000 if (rc)
3001 break;
3002
3003 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3004
3005 qedr_inc_sw_prod(&qp->sq);
3006
3007 qp->sq.db_data.data.value++;
3008
3009 wr = wr->next;
3010 }
3011
3012 /* Trigger doorbell
3013 * If there was a failure in the first WR then it will be triggered in
3014 * vane. However this is not harmful (as long as the producer value is
3015 * unchanged). For performance reasons we avoid checking for this
3016 * redundant doorbell.
3017 */
3018 wmb();
3019 writel(qp->sq.db_data.raw, qp->sq.db);
3020
3021 /* Make sure write sticks */
3022 mmiowb();
3023
3024 spin_unlock_irqrestore(&qp->q_lock, flags);
3025
3026 return rc;
3027}
3028
3029int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3030 struct ib_recv_wr **bad_wr)
3031{
3032 struct qedr_qp *qp = get_qedr_qp(ibqp);
3033 struct qedr_dev *dev = qp->dev;
3034 unsigned long flags;
3035 int status = 0;
3036
04886779
RA
3037 if (qp->qp_type == IB_QPT_GSI)
3038 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3039
afa0e13b
RA
3040 spin_lock_irqsave(&qp->q_lock, flags);
3041
3042 if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3043 (qp->state == QED_ROCE_QP_STATE_ERR)) {
3044 spin_unlock_irqrestore(&qp->q_lock, flags);
3045 *bad_wr = wr;
3046 return -EINVAL;
3047 }
3048
3049 while (wr) {
3050 int i;
3051
3052 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3053 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3054 wr->num_sge > qp->rq.max_sges) {
3055 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3056 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3057 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3058 qp->rq.max_sges);
3059 status = -ENOMEM;
3060 *bad_wr = wr;
3061 break;
3062 }
3063 for (i = 0; i < wr->num_sge; i++) {
3064 u32 flags = 0;
3065 struct rdma_rq_sge *rqe =
3066 qed_chain_produce(&qp->rq.pbl);
3067
3068 /* First one must include the number
3069 * of SGE in the list
3070 */
3071 if (!i)
3072 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3073 wr->num_sge);
3074
3075 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3076 wr->sg_list[i].lkey);
3077
3078 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3079 wr->sg_list[i].length, flags);
3080 }
3081
3082 /* Special case of no sges. FW requires between 1-4 sges...
3083 * in this case we need to post 1 sge with length zero. this is
3084 * because rdma write with immediate consumes an RQ.
3085 */
3086 if (!wr->num_sge) {
3087 u32 flags = 0;
3088 struct rdma_rq_sge *rqe =
3089 qed_chain_produce(&qp->rq.pbl);
3090
3091 /* First one must include the number
3092 * of SGE in the list
3093 */
3094 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3095 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3096
3097 RQ_SGE_SET(rqe, 0, 0, flags);
3098 i = 1;
3099 }
3100
3101 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3102 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3103
3104 qedr_inc_sw_prod(&qp->rq);
3105
3106 /* Flush all the writes before signalling doorbell */
3107 wmb();
3108
3109 qp->rq.db_data.data.value++;
3110
3111 writel(qp->rq.db_data.raw, qp->rq.db);
3112
3113 /* Make sure write sticks */
3114 mmiowb();
3115
3116 wr = wr->next;
3117 }
3118
3119 spin_unlock_irqrestore(&qp->q_lock, flags);
3120
3121 return status;
3122}
3123
3124static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3125{
3126 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3127
3128 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3129 cq->pbl_toggle;
3130}
3131
3132static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3133{
3134 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3135 struct qedr_qp *qp;
3136
3137 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3138 resp_cqe->qp_handle.lo,
3139 u64);
3140 return qp;
3141}
3142
3143static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3144{
3145 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3146
3147 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3148}
3149
3150/* Return latest CQE (needs processing) */
3151static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3152{
3153 return cq->latest_cqe;
3154}
3155
3156/* In fmr we need to increase the number of fmr completed counter for the fmr
3157 * algorithm determining whether we can free a pbl or not.
3158 * we need to perform this whether the work request was signaled or not. for
3159 * this purpose we call this function from the condition that checks if a wr
3160 * should be skipped, to make sure we don't miss it ( possibly this fmr
3161 * operation was not signalted)
3162 */
3163static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3164{
3165 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3166 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3167}
3168
3169static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3170 struct qedr_cq *cq, int num_entries,
3171 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3172 int force)
3173{
3174 u16 cnt = 0;
3175
3176 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3177 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3178 qedr_chk_if_fmr(qp);
3179 /* skip WC */
3180 goto next_cqe;
3181 }
3182
3183 /* fill WC */
3184 wc->status = status;
3185 wc->wc_flags = 0;
3186 wc->src_qp = qp->id;
3187 wc->qp = &qp->ibqp;
3188
3189 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3190 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3191
3192 switch (wc->opcode) {
3193 case IB_WC_RDMA_WRITE:
3194 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3195 break;
3196 case IB_WC_COMP_SWAP:
3197 case IB_WC_FETCH_ADD:
3198 wc->byte_len = 8;
3199 break;
3200 case IB_WC_REG_MR:
3201 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3202 break;
3203 default:
3204 break;
3205 }
3206
3207 num_entries--;
3208 wc++;
3209 cnt++;
3210next_cqe:
3211 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3212 qed_chain_consume(&qp->sq.pbl);
3213 qedr_inc_sw_cons(&qp->sq);
3214 }
3215
3216 return cnt;
3217}
3218
3219static int qedr_poll_cq_req(struct qedr_dev *dev,
3220 struct qedr_qp *qp, struct qedr_cq *cq,
3221 int num_entries, struct ib_wc *wc,
3222 struct rdma_cqe_requester *req)
3223{
3224 int cnt = 0;
3225
3226 switch (req->status) {
3227 case RDMA_CQE_REQ_STS_OK:
3228 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3229 IB_WC_SUCCESS, 0);
3230 break;
3231 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3232 DP_ERR(dev,
3233 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3234 cq->icid, qp->icid);
3235 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3236 IB_WC_WR_FLUSH_ERR, 0);
3237 break;
3238 default:
3239 /* process all WQE before the cosumer */
3240 qp->state = QED_ROCE_QP_STATE_ERR;
3241 cnt = process_req(dev, qp, cq, num_entries, wc,
3242 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3243 wc += cnt;
3244 /* if we have extra WC fill it with actual error info */
3245 if (cnt < num_entries) {
3246 enum ib_wc_status wc_status;
3247
3248 switch (req->status) {
3249 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3250 DP_ERR(dev,
3251 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3252 cq->icid, qp->icid);
3253 wc_status = IB_WC_BAD_RESP_ERR;
3254 break;
3255 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3256 DP_ERR(dev,
3257 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3258 cq->icid, qp->icid);
3259 wc_status = IB_WC_LOC_LEN_ERR;
3260 break;
3261 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3262 DP_ERR(dev,
3263 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3264 cq->icid, qp->icid);
3265 wc_status = IB_WC_LOC_QP_OP_ERR;
3266 break;
3267 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3268 DP_ERR(dev,
3269 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3270 cq->icid, qp->icid);
3271 wc_status = IB_WC_LOC_PROT_ERR;
3272 break;
3273 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3274 DP_ERR(dev,
3275 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3276 cq->icid, qp->icid);
3277 wc_status = IB_WC_MW_BIND_ERR;
3278 break;
3279 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3280 DP_ERR(dev,
3281 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3282 cq->icid, qp->icid);
3283 wc_status = IB_WC_REM_INV_REQ_ERR;
3284 break;
3285 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3286 DP_ERR(dev,
3287 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3288 cq->icid, qp->icid);
3289 wc_status = IB_WC_REM_ACCESS_ERR;
3290 break;
3291 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3292 DP_ERR(dev,
3293 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3294 cq->icid, qp->icid);
3295 wc_status = IB_WC_REM_OP_ERR;
3296 break;
3297 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3298 DP_ERR(dev,
3299 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3300 cq->icid, qp->icid);
3301 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3302 break;
3303 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3304 DP_ERR(dev,
3305 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3306 cq->icid, qp->icid);
3307 wc_status = IB_WC_RETRY_EXC_ERR;
3308 break;
3309 default:
3310 DP_ERR(dev,
3311 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3312 cq->icid, qp->icid);
3313 wc_status = IB_WC_GENERAL_ERR;
3314 }
3315 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3316 wc_status, 1);
3317 }
3318 }
3319
3320 return cnt;
3321}
3322
3323static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3324 struct qedr_cq *cq, struct ib_wc *wc,
3325 struct rdma_cqe_responder *resp, u64 wr_id)
3326{
3327 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3328 u8 flags;
3329
3330 wc->opcode = IB_WC_RECV;
3331 wc->wc_flags = 0;
3332
3333 switch (resp->status) {
3334 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3335 wc_status = IB_WC_LOC_ACCESS_ERR;
3336 break;
3337 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3338 wc_status = IB_WC_LOC_LEN_ERR;
3339 break;
3340 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3341 wc_status = IB_WC_LOC_QP_OP_ERR;
3342 break;
3343 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3344 wc_status = IB_WC_LOC_PROT_ERR;
3345 break;
3346 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3347 wc_status = IB_WC_MW_BIND_ERR;
3348 break;
3349 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3350 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3351 break;
3352 case RDMA_CQE_RESP_STS_OK:
3353 wc_status = IB_WC_SUCCESS;
3354 wc->byte_len = le32_to_cpu(resp->length);
3355
3356 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3357
3358 if (flags == QEDR_RESP_RDMA_IMM)
3359 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3360
3361 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3362 wc->ex.imm_data =
3363 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3364 wc->wc_flags |= IB_WC_WITH_IMM;
3365 }
3366 break;
3367 default:
3368 wc->status = IB_WC_GENERAL_ERR;
3369 DP_ERR(dev, "Invalid CQE status detected\n");
3370 }
3371
3372 /* fill WC */
3373 wc->status = wc_status;
3374 wc->src_qp = qp->id;
3375 wc->qp = &qp->ibqp;
3376 wc->wr_id = wr_id;
3377}
3378
3379static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3380 struct qedr_cq *cq, struct ib_wc *wc,
3381 struct rdma_cqe_responder *resp)
3382{
3383 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3384
3385 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3386
3387 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3388 qed_chain_consume(&qp->rq.pbl);
3389 qedr_inc_sw_cons(&qp->rq);
3390
3391 return 1;
3392}
3393
3394static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3395 int num_entries, struct ib_wc *wc, u16 hw_cons)
3396{
3397 u16 cnt = 0;
3398
3399 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3400 /* fill WC */
3401 wc->status = IB_WC_WR_FLUSH_ERR;
3402 wc->wc_flags = 0;
3403 wc->src_qp = qp->id;
3404 wc->byte_len = 0;
3405 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3406 wc->qp = &qp->ibqp;
3407 num_entries--;
3408 wc++;
3409 cnt++;
3410 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3411 qed_chain_consume(&qp->rq.pbl);
3412 qedr_inc_sw_cons(&qp->rq);
3413 }
3414
3415 return cnt;
3416}
3417
3418static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3419 struct rdma_cqe_responder *resp, int *update)
3420{
3421 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3422 consume_cqe(cq);
3423 *update |= 1;
3424 }
3425}
3426
3427static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3428 struct qedr_cq *cq, int num_entries,
3429 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3430 int *update)
3431{
3432 int cnt;
3433
3434 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3435 cnt = process_resp_flush(qp, cq, num_entries, wc,
3436 resp->rq_cons);
3437 try_consume_resp_cqe(cq, qp, resp, update);
3438 } else {
3439 cnt = process_resp_one(dev, qp, cq, wc, resp);
3440 consume_cqe(cq);
3441 *update |= 1;
3442 }
3443
3444 return cnt;
3445}
3446
3447static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3448 struct rdma_cqe_requester *req, int *update)
3449{
3450 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3451 consume_cqe(cq);
3452 *update |= 1;
3453 }
3454}
3455
3456int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3457{
3458 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3459 struct qedr_cq *cq = get_qedr_cq(ibcq);
3460 union rdma_cqe *cqe = cq->latest_cqe;
3461 u32 old_cons, new_cons;
3462 unsigned long flags;
3463 int update = 0;
3464 int done = 0;
3465
04886779
RA
3466 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3467 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3468
afa0e13b
RA
3469 spin_lock_irqsave(&cq->cq_lock, flags);
3470 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3471 while (num_entries && is_valid_cqe(cq, cqe)) {
3472 struct qedr_qp *qp;
3473 int cnt = 0;
3474
3475 /* prevent speculative reads of any field of CQE */
3476 rmb();
3477
3478 qp = cqe_get_qp(cqe);
3479 if (!qp) {
3480 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3481 break;
3482 }
3483
3484 wc->qp = &qp->ibqp;
3485
3486 switch (cqe_get_type(cqe)) {
3487 case RDMA_CQE_TYPE_REQUESTER:
3488 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3489 &cqe->req);
3490 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3491 break;
3492 case RDMA_CQE_TYPE_RESPONDER_RQ:
3493 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3494 &cqe->resp, &update);
3495 break;
3496 case RDMA_CQE_TYPE_INVALID:
3497 default:
3498 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3499 cqe_get_type(cqe));
3500 }
3501 num_entries -= cnt;
3502 wc += cnt;
3503 done += cnt;
3504
3505 cqe = get_cqe(cq);
3506 }
3507 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3508
3509 cq->cq_cons += new_cons - old_cons;
3510
3511 if (update)
3512 /* doorbell notifies abount latest VALID entry,
3513 * but chain already point to the next INVALID one
3514 */
3515 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3516
3517 spin_unlock_irqrestore(&cq->cq_lock, flags);
3518 return done;
3519}
993d1b52
RA
3520
3521int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3522 u8 port_num,
3523 const struct ib_wc *in_wc,
3524 const struct ib_grh *in_grh,
3525 const struct ib_mad_hdr *mad_hdr,
3526 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3527 size_t *out_mad_size, u16 *out_mad_pkey_index)
3528{
3529 struct qedr_dev *dev = get_qedr_dev(ibdev);
3530
3531 DP_DEBUG(dev, QEDR_MSG_GSI,
3532 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3533 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3534 mad_hdr->class_specific, mad_hdr->class_version,
3535 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3536 return IB_MAD_RESULT_SUCCESS;
3537}
3538
3539int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3540 struct ib_port_immutable *immutable)
3541{
3542 struct ib_port_attr attr;
3543 int err;
3544
3545 err = qedr_query_port(ibdev, port_num, &attr);
3546 if (err)
3547 return err;
3548
3549 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3550 immutable->gid_tbl_len = attr.gid_tbl_len;
3551 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3552 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3553 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3554
3555 return 0;
3556}