]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
RDMA/ocrdma: FRMA code cleanup
[mirror_ubuntu-zesty-kernel.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
CommitLineData
fe2caefc
PP
1/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
7b33dc2b 56 if (index >= OCRDMA_MAX_SGID)
fe2caefc
PP
57 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = ~0ull;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = 0;
77 attr->max_qp = dev->attr.max_qp;
d3cb6c0b 78 attr->max_ah = OCRDMA_MAX_AH;
fe2caefc
PP
79 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
2b51a9b9
NG
85 IB_DEVICE_LOCAL_DMA_LKEY |
86 IB_DEVICE_MEM_MGT_EXTENSIONS;
634c5796 87 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
45e86b33 88 attr->max_sge_rd = dev->attr.max_rdma_sge;
fe2caefc
PP
89 attr->max_cq = dev->attr.max_cq;
90 attr->max_cqe = dev->attr.max_cqe;
91 attr->max_mr = dev->attr.max_mr;
92 attr->max_mw = 0;
93 attr->max_pd = dev->attr.max_pd;
94 attr->atomic_cap = 0;
95 attr->max_fmr = 0;
96 attr->max_map_per_fmr = 0;
97 attr->max_qp_rd_atom =
98 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
7c33880c 100 attr->max_srq = dev->attr.max_srq;
d1e09ebf 101 attr->max_srq_sge = dev->attr.max_srq_sge;
fe2caefc
PP
102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 attr->max_fast_reg_page_list_len = 0;
105 attr->max_pkeys = 1;
106 return 0;
107}
108
109int ocrdma_query_port(struct ib_device *ibdev,
110 u8 port, struct ib_port_attr *props)
111{
112 enum ib_port_state port_state;
113 struct ocrdma_dev *dev;
114 struct net_device *netdev;
115
116 dev = get_ocrdma_dev(ibdev);
117 if (port > 1) {
ef99c4c2
NG
118 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
119 dev->id, port);
fe2caefc
PP
120 return -EINVAL;
121 }
122 netdev = dev->nic_info.netdev;
123 if (netif_running(netdev) && netif_oper_up(netdev)) {
124 port_state = IB_PORT_ACTIVE;
125 props->phys_state = 5;
126 } else {
127 port_state = IB_PORT_DOWN;
128 props->phys_state = 3;
129 }
130 props->max_mtu = IB_MTU_4096;
131 props->active_mtu = iboe_get_mtu(netdev->mtu);
132 props->lid = 0;
133 props->lmc = 0;
134 props->sm_lid = 0;
135 props->sm_sl = 0;
136 props->state = port_state;
137 props->port_cap_flags =
138 IB_PORT_CM_SUP |
139 IB_PORT_REINIT_SUP |
140 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
141 props->gid_tbl_len = OCRDMA_MAX_SGID;
142 props->pkey_tbl_len = 1;
143 props->bad_pkey_cntr = 0;
144 props->qkey_viol_cntr = 0;
145 props->active_width = IB_WIDTH_1X;
146 props->active_speed = 4;
147 props->max_msg_sz = 0x80000000;
148 props->max_vl_num = 4;
149 return 0;
150}
151
152int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
153 struct ib_port_modify *props)
154{
155 struct ocrdma_dev *dev;
156
157 dev = get_ocrdma_dev(ibdev);
158 if (port > 1) {
ef99c4c2 159 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
fe2caefc
PP
160 return -EINVAL;
161 }
162 return 0;
163}
164
165static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
166 unsigned long len)
167{
168 struct ocrdma_mm *mm;
169
170 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
171 if (mm == NULL)
172 return -ENOMEM;
173 mm->key.phy_addr = phy_addr;
174 mm->key.len = len;
175 INIT_LIST_HEAD(&mm->entry);
176
177 mutex_lock(&uctx->mm_list_lock);
178 list_add_tail(&mm->entry, &uctx->mm_head);
179 mutex_unlock(&uctx->mm_list_lock);
180 return 0;
181}
182
183static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
184 unsigned long len)
185{
186 struct ocrdma_mm *mm, *tmp;
187
188 mutex_lock(&uctx->mm_list_lock);
189 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
43a6b402 190 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
fe2caefc
PP
191 continue;
192
193 list_del(&mm->entry);
194 kfree(mm);
195 break;
196 }
197 mutex_unlock(&uctx->mm_list_lock);
198}
199
200static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
201 unsigned long len)
202{
203 bool found = false;
204 struct ocrdma_mm *mm;
205
206 mutex_lock(&uctx->mm_list_lock);
207 list_for_each_entry(mm, &uctx->mm_head, entry) {
43a6b402 208 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
fe2caefc
PP
209 continue;
210
211 found = true;
212 break;
213 }
214 mutex_unlock(&uctx->mm_list_lock);
215 return found;
216}
217
218struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
219 struct ib_udata *udata)
220{
221 int status;
222 struct ocrdma_ucontext *ctx;
223 struct ocrdma_alloc_ucontext_resp resp;
224 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
225 struct pci_dev *pdev = dev->nic_info.pdev;
226 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
227
228 if (!udata)
229 return ERR_PTR(-EFAULT);
230 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
231 if (!ctx)
232 return ERR_PTR(-ENOMEM);
fe2caefc
PP
233 INIT_LIST_HEAD(&ctx->mm_head);
234 mutex_init(&ctx->mm_list_lock);
235
236 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
237 &ctx->ah_tbl.pa, GFP_KERNEL);
238 if (!ctx->ah_tbl.va) {
239 kfree(ctx);
240 return ERR_PTR(-ENOMEM);
241 }
242 memset(ctx->ah_tbl.va, 0, map_len);
243 ctx->ah_tbl.len = map_len;
244
63ea3749 245 memset(&resp, 0, sizeof(resp));
fe2caefc
PP
246 resp.ah_tbl_len = ctx->ah_tbl.len;
247 resp.ah_tbl_page = ctx->ah_tbl.pa;
248
249 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
250 if (status)
251 goto map_err;
252 resp.dev_id = dev->id;
253 resp.max_inline_data = dev->attr.max_inline_data;
254 resp.wqe_size = dev->attr.wqe_size;
255 resp.rqe_size = dev->attr.rqe_size;
256 resp.dpp_wqe_size = dev->attr.wqe_size;
fe2caefc
PP
257
258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
259 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
260 if (status)
261 goto cpy_err;
262 return &ctx->ibucontext;
263
264cpy_err:
265 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
266map_err:
267 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
268 ctx->ah_tbl.pa);
269 kfree(ctx);
270 return ERR_PTR(status);
271}
272
273int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
274{
275 struct ocrdma_mm *mm, *tmp;
276 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
1afc0454
NG
277 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
278 struct pci_dev *pdev = dev->nic_info.pdev;
fe2caefc
PP
279
280 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
281 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
282 uctx->ah_tbl.pa);
283
284 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
285 list_del(&mm->entry);
286 kfree(mm);
287 }
288 kfree(uctx);
289 return 0;
290}
291
292int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
293{
294 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
1afc0454 295 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
fe2caefc
PP
296 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
297 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
298 unsigned long len = (vma->vm_end - vma->vm_start);
299 int status = 0;
300 bool found;
301
302 if (vma->vm_start & (PAGE_SIZE - 1))
303 return -EINVAL;
304 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
305 if (!found)
306 return -EINVAL;
307
308 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
309 dev->nic_info.db_total_size)) &&
310 (len <= dev->nic_info.db_page_size)) {
43a6b402
NG
311 if (vma->vm_flags & VM_READ)
312 return -EPERM;
313
314 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
fe2caefc
PP
315 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
316 len, vma->vm_page_prot);
317 } else if (dev->nic_info.dpp_unmapped_len &&
318 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
319 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
320 dev->nic_info.dpp_unmapped_len)) &&
321 (len <= dev->nic_info.dpp_unmapped_len)) {
43a6b402
NG
322 if (vma->vm_flags & VM_READ)
323 return -EPERM;
324
fe2caefc
PP
325 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
326 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
327 len, vma->vm_page_prot);
328 } else {
fe2caefc
PP
329 status = remap_pfn_range(vma, vma->vm_start,
330 vma->vm_pgoff, len, vma->vm_page_prot);
331 }
332 return status;
333}
334
45e86b33 335static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
fe2caefc
PP
336 struct ib_ucontext *ib_ctx,
337 struct ib_udata *udata)
338{
339 int status;
340 u64 db_page_addr;
da496438 341 u64 dpp_page_addr = 0;
fe2caefc
PP
342 u32 db_page_size;
343 struct ocrdma_alloc_pd_uresp rsp;
344 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
345
63ea3749 346 memset(&rsp, 0, sizeof(rsp));
fe2caefc
PP
347 rsp.id = pd->id;
348 rsp.dpp_enabled = pd->dpp_enabled;
f99b1649
NG
349 db_page_addr = dev->nic_info.unmapped_db +
350 (pd->id * dev->nic_info.db_page_size);
351 db_page_size = dev->nic_info.db_page_size;
fe2caefc
PP
352
353 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
354 if (status)
355 return status;
356
357 if (pd->dpp_enabled) {
f99b1649 358 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
43a6b402 359 (pd->id * PAGE_SIZE);
fe2caefc 360 status = ocrdma_add_mmap(uctx, dpp_page_addr,
43a6b402 361 PAGE_SIZE);
fe2caefc
PP
362 if (status)
363 goto dpp_map_err;
364 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
365 rsp.dpp_page_addr_lo = dpp_page_addr;
366 }
367
368 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
369 if (status)
370 goto ucopy_err;
371
372 pd->uctx = uctx;
373 return 0;
374
375ucopy_err:
da496438 376 if (pd->dpp_enabled)
43a6b402 377 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
fe2caefc
PP
378dpp_map_err:
379 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
380 return status;
381}
382
383struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
384 struct ib_ucontext *context,
385 struct ib_udata *udata)
386{
387 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
388 struct ocrdma_pd *pd;
389 int status;
390
391 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
392 if (!pd)
393 return ERR_PTR(-ENOMEM);
fe2caefc 394 if (udata && context) {
f99b1649
NG
395 pd->dpp_enabled =
396 (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
fe2caefc
PP
397 pd->num_dpp_qp =
398 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
399 }
43a6b402 400retry:
fe2caefc
PP
401 status = ocrdma_mbx_alloc_pd(dev, pd);
402 if (status) {
43a6b402
NG
403 /* try for pd with out dpp */
404 if (pd->dpp_enabled) {
405 pd->dpp_enabled = false;
406 pd->num_dpp_qp = 0;
407 goto retry;
408 } else {
409 kfree(pd);
410 return ERR_PTR(status);
411 }
fe2caefc 412 }
fe2caefc
PP
413
414 if (udata && context) {
45e86b33 415 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
fe2caefc
PP
416 if (status)
417 goto err;
418 }
419 return &pd->ibpd;
420
421err:
45e86b33
NG
422 status = ocrdma_mbx_dealloc_pd(dev, pd);
423 kfree(pd);
fe2caefc
PP
424 return ERR_PTR(status);
425}
426
427int ocrdma_dealloc_pd(struct ib_pd *ibpd)
428{
429 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
f99b1649 430 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
431 int status;
432 u64 usr_db;
433
fe2caefc
PP
434 status = ocrdma_mbx_dealloc_pd(dev, pd);
435 if (pd->uctx) {
436 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
43a6b402 437 (pd->id * PAGE_SIZE);
fe2caefc 438 if (pd->dpp_enabled)
43a6b402 439 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
fe2caefc
PP
440 usr_db = dev->nic_info.unmapped_db +
441 (pd->id * dev->nic_info.db_page_size);
442 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
443 }
444 kfree(pd);
fe2caefc
PP
445 return status;
446}
447
1afc0454
NG
448static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
449 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
fe2caefc
PP
450{
451 int status;
fe2caefc 452
fe2caefc
PP
453 mr->hwmr.fr_mr = 0;
454 mr->hwmr.local_rd = 1;
455 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
456 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
457 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
458 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
459 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
460 mr->hwmr.num_pbls = num_pbls;
461
f99b1649
NG
462 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
463 if (status)
464 return status;
465
fe2caefc
PP
466 mr->ibmr.lkey = mr->hwmr.lkey;
467 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
468 mr->ibmr.rkey = mr->hwmr.lkey;
f99b1649 469 return 0;
fe2caefc
PP
470}
471
472struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
473{
f99b1649 474 int status;
fe2caefc 475 struct ocrdma_mr *mr;
f99b1649
NG
476 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
477 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
478
479 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
480 pr_err("%s err, invalid access rights\n", __func__);
481 return ERR_PTR(-EINVAL);
482 }
fe2caefc 483
f99b1649
NG
484 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
485 if (!mr)
486 return ERR_PTR(-ENOMEM);
487
1afc0454 488 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
f99b1649
NG
489 OCRDMA_ADDR_CHECK_DISABLE);
490 if (status) {
491 kfree(mr);
492 return ERR_PTR(status);
493 }
fe2caefc
PP
494
495 return &mr->ibmr;
496}
497
498static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
499 struct ocrdma_hw_mr *mr)
500{
501 struct pci_dev *pdev = dev->nic_info.pdev;
502 int i = 0;
503
504 if (mr->pbl_table) {
505 for (i = 0; i < mr->num_pbls; i++) {
506 if (!mr->pbl_table[i].va)
507 continue;
508 dma_free_coherent(&pdev->dev, mr->pbl_size,
509 mr->pbl_table[i].va,
510 mr->pbl_table[i].pa);
511 }
512 kfree(mr->pbl_table);
513 mr->pbl_table = NULL;
514 }
515}
516
1afc0454
NG
517static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
518 u32 num_pbes)
fe2caefc
PP
519{
520 u32 num_pbls = 0;
521 u32 idx = 0;
522 int status = 0;
523 u32 pbl_size;
524
525 do {
526 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
527 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
528 status = -EFAULT;
529 break;
530 }
531 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
532 num_pbls = num_pbls / (pbl_size / sizeof(u64));
533 idx++;
1afc0454 534 } while (num_pbls >= dev->attr.max_num_mr_pbl);
fe2caefc
PP
535
536 mr->hwmr.num_pbes = num_pbes;
537 mr->hwmr.num_pbls = num_pbls;
538 mr->hwmr.pbl_size = pbl_size;
539 return status;
540}
541
542static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
543{
544 int status = 0;
545 int i;
546 u32 dma_len = mr->pbl_size;
547 struct pci_dev *pdev = dev->nic_info.pdev;
548 void *va;
549 dma_addr_t pa;
550
551 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
552 mr->num_pbls, GFP_KERNEL);
553
554 if (!mr->pbl_table)
555 return -ENOMEM;
556
557 for (i = 0; i < mr->num_pbls; i++) {
558 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
559 if (!va) {
560 ocrdma_free_mr_pbl_tbl(dev, mr);
561 status = -ENOMEM;
562 break;
563 }
564 memset(va, 0, dma_len);
565 mr->pbl_table[i].va = va;
566 mr->pbl_table[i].pa = pa;
567 }
568 return status;
569}
570
571static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
572 u32 num_pbes)
573{
574 struct ocrdma_pbe *pbe;
575 struct ib_umem_chunk *chunk;
576 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
577 struct ib_umem *umem = mr->umem;
578 int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
579
580 if (!mr->hwmr.num_pbes)
581 return;
582
583 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
584 pbe_cnt = 0;
585
586 shift = ilog2(umem->page_size);
587
588 list_for_each_entry(chunk, &umem->chunk_list, list) {
589 /* get all the dma regions from the chunk. */
590 for (i = 0; i < chunk->nmap; i++) {
591 pages = sg_dma_len(&chunk->page_list[i]) >> shift;
592 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
593 /* store the page address in pbe */
594 pbe->pa_lo =
595 cpu_to_le32(sg_dma_address
596 (&chunk->page_list[i]) +
597 (umem->page_size * pg_cnt));
598 pbe->pa_hi =
599 cpu_to_le32(upper_32_bits
600 ((sg_dma_address
601 (&chunk->page_list[i]) +
602 umem->page_size * pg_cnt)));
603 pbe_cnt += 1;
604 total_num_pbes += 1;
605 pbe++;
606
607 /* if done building pbes, issue the mbx cmd. */
608 if (total_num_pbes == num_pbes)
609 return;
610
611 /* if the given pbl is full storing the pbes,
612 * move to next pbl.
613 */
614 if (pbe_cnt ==
615 (mr->hwmr.pbl_size / sizeof(u64))) {
616 pbl_tbl++;
617 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
618 pbe_cnt = 0;
619 }
620 }
621 }
622 }
623}
624
625struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
626 u64 usr_addr, int acc, struct ib_udata *udata)
627{
628 int status = -ENOMEM;
f99b1649 629 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
630 struct ocrdma_mr *mr;
631 struct ocrdma_pd *pd;
fe2caefc
PP
632 u32 num_pbes;
633
634 pd = get_ocrdma_pd(ibpd);
fe2caefc
PP
635
636 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
637 return ERR_PTR(-EINVAL);
638
639 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
640 if (!mr)
641 return ERR_PTR(status);
fe2caefc
PP
642 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
643 if (IS_ERR(mr->umem)) {
644 status = -EFAULT;
645 goto umem_err;
646 }
647 num_pbes = ib_umem_page_count(mr->umem);
1afc0454 648 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
fe2caefc
PP
649 if (status)
650 goto umem_err;
651
652 mr->hwmr.pbe_size = mr->umem->page_size;
653 mr->hwmr.fbo = mr->umem->offset;
654 mr->hwmr.va = usr_addr;
655 mr->hwmr.len = len;
656 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
657 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
658 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
659 mr->hwmr.local_rd = 1;
660 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
661 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
662 if (status)
663 goto umem_err;
664 build_user_pbes(dev, mr, num_pbes);
665 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
666 if (status)
667 goto mbx_err;
fe2caefc
PP
668 mr->ibmr.lkey = mr->hwmr.lkey;
669 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
670 mr->ibmr.rkey = mr->hwmr.lkey;
671
672 return &mr->ibmr;
673
674mbx_err:
675 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
676umem_err:
677 kfree(mr);
678 return ERR_PTR(status);
679}
680
681int ocrdma_dereg_mr(struct ib_mr *ib_mr)
682{
683 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
1afc0454 684 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
fe2caefc
PP
685 int status;
686
687 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
688
689 if (mr->hwmr.fr_mr == 0)
690 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
691
fe2caefc
PP
692 /* it could be user registered memory. */
693 if (mr->umem)
694 ib_umem_release(mr->umem);
695 kfree(mr);
696 return status;
697}
698
1afc0454
NG
699static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
700 struct ib_udata *udata,
fe2caefc
PP
701 struct ib_ucontext *ib_ctx)
702{
703 int status;
704 struct ocrdma_ucontext *uctx;
705 struct ocrdma_create_cq_uresp uresp;
706
63ea3749 707 memset(&uresp, 0, sizeof(uresp));
fe2caefc 708 uresp.cq_id = cq->id;
43a6b402 709 uresp.page_size = PAGE_ALIGN(cq->len);
fe2caefc
PP
710 uresp.num_pages = 1;
711 uresp.max_hw_cqe = cq->max_hw_cqe;
712 uresp.page_addr[0] = cq->pa;
1afc0454
NG
713 uresp.db_page_addr = dev->nic_info.unmapped_db;
714 uresp.db_page_size = dev->nic_info.db_page_size;
fe2caefc
PP
715 uresp.phase_change = cq->phase_change ? 1 : 0;
716 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
717 if (status) {
ef99c4c2 718 pr_err("%s(%d) copy error cqid=0x%x.\n",
1afc0454 719 __func__, dev->id, cq->id);
fe2caefc
PP
720 goto err;
721 }
722 uctx = get_ocrdma_ucontext(ib_ctx);
723 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
724 if (status)
725 goto err;
726 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
727 if (status) {
728 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
729 goto err;
730 }
731 cq->ucontext = uctx;
732err:
733 return status;
734}
735
736struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
737 struct ib_ucontext *ib_ctx,
738 struct ib_udata *udata)
739{
740 struct ocrdma_cq *cq;
741 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
742 int status;
743 struct ocrdma_create_cq_ureq ureq;
744
745 if (udata) {
746 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
747 return ERR_PTR(-EFAULT);
748 } else
749 ureq.dpp_cq = 0;
750 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
751 if (!cq)
752 return ERR_PTR(-ENOMEM);
753
754 spin_lock_init(&cq->cq_lock);
755 spin_lock_init(&cq->comp_handler_lock);
fe2caefc
PP
756 INIT_LIST_HEAD(&cq->sq_head);
757 INIT_LIST_HEAD(&cq->rq_head);
fe2caefc
PP
758
759 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
760 if (status) {
761 kfree(cq);
762 return ERR_PTR(status);
763 }
764 if (ib_ctx) {
1afc0454 765 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
fe2caefc
PP
766 if (status)
767 goto ctx_err;
768 }
769 cq->phase = OCRDMA_CQE_VALID;
770 cq->arm_needed = true;
771 dev->cq_tbl[cq->id] = cq;
772
773 return &cq->ibcq;
774
775ctx_err:
776 ocrdma_mbx_destroy_cq(dev, cq);
777 kfree(cq);
778 return ERR_PTR(status);
779}
780
781int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
782 struct ib_udata *udata)
783{
784 int status = 0;
785 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
786
787 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
788 status = -EINVAL;
789 return status;
790 }
791 ibcq->cqe = new_cnt;
792 return status;
793}
794
795int ocrdma_destroy_cq(struct ib_cq *ibcq)
796{
797 int status;
798 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1afc0454 799 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc 800
fe2caefc
PP
801 status = ocrdma_mbx_destroy_cq(dev, cq);
802
803 if (cq->ucontext) {
43a6b402
NG
804 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
805 PAGE_ALIGN(cq->len));
fe2caefc
PP
806 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
807 dev->nic_info.db_page_size);
808 }
809 dev->cq_tbl[cq->id] = NULL;
810
811 kfree(cq);
812 return status;
813}
814
815static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
816{
817 int status = -EINVAL;
818
819 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
820 dev->qp_tbl[qp->id] = qp;
821 status = 0;
822 }
823 return status;
824}
825
826static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
827{
828 dev->qp_tbl[qp->id] = NULL;
829}
830
831static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
832 struct ib_qp_init_attr *attrs)
833{
43a6b402
NG
834 if ((attrs->qp_type != IB_QPT_GSI) &&
835 (attrs->qp_type != IB_QPT_RC) &&
836 (attrs->qp_type != IB_QPT_UC) &&
837 (attrs->qp_type != IB_QPT_UD)) {
ef99c4c2
NG
838 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
839 __func__, dev->id, attrs->qp_type);
fe2caefc
PP
840 return -EINVAL;
841 }
43a6b402
NG
842 /* Skip the check for QP1 to support CM size of 128 */
843 if ((attrs->qp_type != IB_QPT_GSI) &&
844 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
ef99c4c2
NG
845 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
846 __func__, dev->id, attrs->cap.max_send_wr);
847 pr_err("%s(%d) supported send_wr=0x%x\n",
848 __func__, dev->id, dev->attr.max_wqe);
fe2caefc
PP
849 return -EINVAL;
850 }
851 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
ef99c4c2
NG
852 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
853 __func__, dev->id, attrs->cap.max_recv_wr);
854 pr_err("%s(%d) supported recv_wr=0x%x\n",
855 __func__, dev->id, dev->attr.max_rqe);
fe2caefc
PP
856 return -EINVAL;
857 }
858 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
ef99c4c2
NG
859 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
860 __func__, dev->id, attrs->cap.max_inline_data);
861 pr_err("%s(%d) supported inline data size=0x%x\n",
862 __func__, dev->id, dev->attr.max_inline_data);
fe2caefc
PP
863 return -EINVAL;
864 }
865 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
ef99c4c2
NG
866 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
867 __func__, dev->id, attrs->cap.max_send_sge);
868 pr_err("%s(%d) supported send_sge=0x%x\n",
869 __func__, dev->id, dev->attr.max_send_sge);
fe2caefc
PP
870 return -EINVAL;
871 }
872 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
ef99c4c2
NG
873 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
874 __func__, dev->id, attrs->cap.max_recv_sge);
875 pr_err("%s(%d) supported recv_sge=0x%x\n",
876 __func__, dev->id, dev->attr.max_recv_sge);
fe2caefc
PP
877 return -EINVAL;
878 }
879 /* unprivileged user space cannot create special QP */
880 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
ef99c4c2 881 pr_err
fe2caefc
PP
882 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
883 __func__, dev->id, attrs->qp_type);
884 return -EINVAL;
885 }
886 /* allow creating only one GSI type of QP */
887 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
ef99c4c2
NG
888 pr_err("%s(%d) GSI special QPs already created.\n",
889 __func__, dev->id);
fe2caefc
PP
890 return -EINVAL;
891 }
892 /* verify consumer QPs are not trying to use GSI QP's CQ */
893 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
894 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
43a6b402 895 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
ef99c4c2 896 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
43a6b402 897 __func__, dev->id);
fe2caefc
PP
898 return -EINVAL;
899 }
900 }
901 return 0;
902}
903
904static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
905 struct ib_udata *udata, int dpp_offset,
906 int dpp_credit_lmt, int srq)
907{
908 int status = 0;
909 u64 usr_db;
910 struct ocrdma_create_qp_uresp uresp;
911 struct ocrdma_dev *dev = qp->dev;
912 struct ocrdma_pd *pd = qp->pd;
913
914 memset(&uresp, 0, sizeof(uresp));
915 usr_db = dev->nic_info.unmapped_db +
916 (pd->id * dev->nic_info.db_page_size);
917 uresp.qp_id = qp->id;
918 uresp.sq_dbid = qp->sq.dbid;
919 uresp.num_sq_pages = 1;
43a6b402 920 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
fe2caefc
PP
921 uresp.sq_page_addr[0] = qp->sq.pa;
922 uresp.num_wqe_allocated = qp->sq.max_cnt;
923 if (!srq) {
924 uresp.rq_dbid = qp->rq.dbid;
925 uresp.num_rq_pages = 1;
43a6b402 926 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
fe2caefc
PP
927 uresp.rq_page_addr[0] = qp->rq.pa;
928 uresp.num_rqe_allocated = qp->rq.max_cnt;
929 }
930 uresp.db_page_addr = usr_db;
931 uresp.db_page_size = dev->nic_info.db_page_size;
932 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
933 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
f11220ee
NG
934 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
935 uresp.db_shift = 24;
fe2caefc
PP
936 } else {
937 uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
938 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
939 uresp.db_shift = 16;
940 }
fe2caefc
PP
941
942 if (qp->dpp_enabled) {
943 uresp.dpp_credit = dpp_credit_lmt;
944 uresp.dpp_offset = dpp_offset;
945 }
946 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
947 if (status) {
ef99c4c2 948 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
fe2caefc
PP
949 goto err;
950 }
951 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
952 uresp.sq_page_size);
953 if (status)
954 goto err;
955
956 if (!srq) {
957 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
958 uresp.rq_page_size);
959 if (status)
960 goto rq_map_err;
961 }
962 return status;
963rq_map_err:
964 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
965err:
966 return status;
967}
968
969static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
970 struct ocrdma_pd *pd)
971{
972 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
973 qp->sq_db = dev->nic_info.db +
974 (pd->id * dev->nic_info.db_page_size) +
975 OCRDMA_DB_GEN2_SQ_OFFSET;
976 qp->rq_db = dev->nic_info.db +
977 (pd->id * dev->nic_info.db_page_size) +
f11220ee 978 OCRDMA_DB_GEN2_RQ_OFFSET;
fe2caefc
PP
979 } else {
980 qp->sq_db = dev->nic_info.db +
981 (pd->id * dev->nic_info.db_page_size) +
982 OCRDMA_DB_SQ_OFFSET;
983 qp->rq_db = dev->nic_info.db +
984 (pd->id * dev->nic_info.db_page_size) +
985 OCRDMA_DB_RQ_OFFSET;
986 }
987}
988
989static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
990{
991 qp->wqe_wr_id_tbl =
992 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
993 GFP_KERNEL);
994 if (qp->wqe_wr_id_tbl == NULL)
995 return -ENOMEM;
996 qp->rqe_wr_id_tbl =
997 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
998 if (qp->rqe_wr_id_tbl == NULL)
999 return -ENOMEM;
1000
1001 return 0;
1002}
1003
1004static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1005 struct ocrdma_pd *pd,
1006 struct ib_qp_init_attr *attrs)
1007{
1008 qp->pd = pd;
1009 spin_lock_init(&qp->q_lock);
1010 INIT_LIST_HEAD(&qp->sq_entry);
1011 INIT_LIST_HEAD(&qp->rq_entry);
1012
1013 qp->qp_type = attrs->qp_type;
1014 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1015 qp->max_inline_data = attrs->cap.max_inline_data;
1016 qp->sq.max_sges = attrs->cap.max_send_sge;
1017 qp->rq.max_sges = attrs->cap.max_recv_sge;
1018 qp->state = OCRDMA_QPS_RST;
2b51a9b9 1019 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
fe2caefc
PP
1020}
1021
fe2caefc
PP
1022
1023static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1024 struct ib_qp_init_attr *attrs)
1025{
1026 if (attrs->qp_type == IB_QPT_GSI) {
1027 dev->gsi_qp_created = 1;
1028 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1029 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1030 }
1031}
1032
1033struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1034 struct ib_qp_init_attr *attrs,
1035 struct ib_udata *udata)
1036{
1037 int status;
1038 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1039 struct ocrdma_qp *qp;
f99b1649 1040 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
1041 struct ocrdma_create_qp_ureq ureq;
1042 u16 dpp_credit_lmt, dpp_offset;
1043
1044 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1045 if (status)
1046 goto gen_err;
1047
1048 memset(&ureq, 0, sizeof(ureq));
1049 if (udata) {
1050 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1051 return ERR_PTR(-EFAULT);
1052 }
1053 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1054 if (!qp) {
1055 status = -ENOMEM;
1056 goto gen_err;
1057 }
1058 qp->dev = dev;
1059 ocrdma_set_qp_init_params(qp, pd, attrs);
43a6b402
NG
1060 if (udata == NULL)
1061 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1062 OCRDMA_QP_FAST_REG);
fe2caefc
PP
1063
1064 mutex_lock(&dev->dev_lock);
1065 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1066 ureq.dpp_cq_id,
1067 &dpp_offset, &dpp_credit_lmt);
1068 if (status)
1069 goto mbx_err;
1070
1071 /* user space QP's wr_id table are managed in library */
1072 if (udata == NULL) {
fe2caefc
PP
1073 status = ocrdma_alloc_wr_id_tbl(qp);
1074 if (status)
1075 goto map_err;
1076 }
1077
1078 status = ocrdma_add_qpn_map(dev, qp);
1079 if (status)
1080 goto map_err;
1081 ocrdma_set_qp_db(dev, qp, pd);
1082 if (udata) {
1083 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1084 dpp_credit_lmt,
1085 (attrs->srq != NULL));
1086 if (status)
1087 goto cpy_err;
1088 }
1089 ocrdma_store_gsi_qp_cq(dev, attrs);
27159f50 1090 qp->ibqp.qp_num = qp->id;
fe2caefc
PP
1091 mutex_unlock(&dev->dev_lock);
1092 return &qp->ibqp;
1093
1094cpy_err:
1095 ocrdma_del_qpn_map(dev, qp);
1096map_err:
1097 ocrdma_mbx_destroy_qp(dev, qp);
1098mbx_err:
1099 mutex_unlock(&dev->dev_lock);
1100 kfree(qp->wqe_wr_id_tbl);
1101 kfree(qp->rqe_wr_id_tbl);
1102 kfree(qp);
ef99c4c2 1103 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
fe2caefc
PP
1104gen_err:
1105 return ERR_PTR(status);
1106}
1107
45e86b33
NG
1108
1109static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
1110{
1111 if (qp->db_cache) {
1112 u32 val = qp->rq.dbid | (qp->db_cache <<
1113 ocrdma_get_num_posted_shift(qp));
1114 iowrite32(val, qp->rq_db);
1115 qp->db_cache = 0;
1116 }
1117}
1118
fe2caefc
PP
1119int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1120 int attr_mask)
1121{
1122 int status = 0;
1123 struct ocrdma_qp *qp;
1124 struct ocrdma_dev *dev;
1125 enum ib_qp_state old_qps;
1126
1127 qp = get_ocrdma_qp(ibqp);
1128 dev = qp->dev;
1129 if (attr_mask & IB_QP_STATE)
057729cb 1130 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
fe2caefc
PP
1131 /* if new and previous states are same hw doesn't need to
1132 * know about it.
1133 */
1134 if (status < 0)
1135 return status;
1136 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
45e86b33
NG
1137 if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
1138 ocrdma_flush_rq_db(qp);
1139
fe2caefc
PP
1140 return status;
1141}
1142
1143int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1144 int attr_mask, struct ib_udata *udata)
1145{
1146 unsigned long flags;
1147 int status = -EINVAL;
1148 struct ocrdma_qp *qp;
1149 struct ocrdma_dev *dev;
1150 enum ib_qp_state old_qps, new_qps;
1151
1152 qp = get_ocrdma_qp(ibqp);
1153 dev = qp->dev;
1154
1155 /* syncronize with multiple context trying to change, retrive qps */
1156 mutex_lock(&dev->dev_lock);
1157 /* syncronize with wqe, rqe posting and cqe processing contexts */
1158 spin_lock_irqsave(&qp->q_lock, flags);
1159 old_qps = get_ibqp_state(qp->state);
1160 if (attr_mask & IB_QP_STATE)
1161 new_qps = attr->qp_state;
1162 else
1163 new_qps = old_qps;
1164 spin_unlock_irqrestore(&qp->q_lock, flags);
1165
1166 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
ef99c4c2
NG
1167 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1168 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1169 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1170 old_qps, new_qps);
fe2caefc
PP
1171 goto param_err;
1172 }
1173
1174 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1175 if (status > 0)
1176 status = 0;
1177param_err:
1178 mutex_unlock(&dev->dev_lock);
1179 return status;
1180}
1181
1182static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1183{
1184 switch (mtu) {
1185 case 256:
1186 return IB_MTU_256;
1187 case 512:
1188 return IB_MTU_512;
1189 case 1024:
1190 return IB_MTU_1024;
1191 case 2048:
1192 return IB_MTU_2048;
1193 case 4096:
1194 return IB_MTU_4096;
1195 default:
1196 return IB_MTU_1024;
1197 }
1198}
1199
1200static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1201{
1202 int ib_qp_acc_flags = 0;
1203
1204 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1205 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1206 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1207 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1208 return ib_qp_acc_flags;
1209}
1210
1211int ocrdma_query_qp(struct ib_qp *ibqp,
1212 struct ib_qp_attr *qp_attr,
1213 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1214{
1215 int status;
1216 u32 qp_state;
1217 struct ocrdma_qp_params params;
1218 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1219 struct ocrdma_dev *dev = qp->dev;
1220
1221 memset(&params, 0, sizeof(params));
1222 mutex_lock(&dev->dev_lock);
1223 status = ocrdma_mbx_query_qp(dev, qp, &params);
1224 mutex_unlock(&dev->dev_lock);
1225 if (status)
1226 goto mbx_err;
1227 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1228 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1229 qp_attr->path_mtu =
1230 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1231 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1232 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1233 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1234 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1235 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1236 qp_attr->dest_qp_num =
1237 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1238
1239 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1240 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1241 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1242 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1243 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1244 qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
1245 qp_init_attr->cap = qp_attr->cap;
1246 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1247 sizeof(params.dgid));
1248 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1249 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1250 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1251 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1252 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1253 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1254 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1255 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1256 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1257
1258 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1259 qp_attr->ah_attr.port_num = 1;
1260 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1261 OCRDMA_QP_PARAMS_SL_MASK) >>
1262 OCRDMA_QP_PARAMS_SL_SHIFT;
1263 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1264 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1265 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1266 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1267 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1268 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1269 qp_attr->retry_cnt =
1270 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1271 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1272 qp_attr->min_rnr_timer = 0;
1273 qp_attr->pkey_index = 0;
1274 qp_attr->port_num = 1;
1275 qp_attr->ah_attr.src_path_bits = 0;
1276 qp_attr->ah_attr.static_rate = 0;
1277 qp_attr->alt_pkey_index = 0;
1278 qp_attr->alt_port_num = 0;
1279 qp_attr->alt_timeout = 0;
1280 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1281 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1282 OCRDMA_QP_PARAMS_STATE_SHIFT;
1283 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1284 qp_attr->max_dest_rd_atomic =
1285 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1286 qp_attr->max_rd_atomic =
1287 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1288 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1289 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1290mbx_err:
1291 return status;
1292}
1293
1294static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1295{
1296 int i = idx / 32;
1297 unsigned int mask = (1 << (idx % 32));
1298
1299 if (srq->idx_bit_fields[i] & mask)
1300 srq->idx_bit_fields[i] &= ~mask;
1301 else
1302 srq->idx_bit_fields[i] |= mask;
1303}
1304
1305static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1306{
43a6b402 1307 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
fe2caefc
PP
1308}
1309
1310static int is_hw_sq_empty(struct ocrdma_qp *qp)
1311{
43a6b402 1312 return (qp->sq.tail == qp->sq.head);
fe2caefc
PP
1313}
1314
1315static int is_hw_rq_empty(struct ocrdma_qp *qp)
1316{
43a6b402 1317 return (qp->rq.tail == qp->rq.head);
fe2caefc
PP
1318}
1319
1320static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1321{
1322 return q->va + (q->head * q->entry_size);
1323}
1324
1325static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1326 u32 idx)
1327{
1328 return q->va + (idx * q->entry_size);
1329}
1330
1331static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1332{
1333 q->head = (q->head + 1) & q->max_wqe_idx;
1334}
1335
1336static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1337{
1338 q->tail = (q->tail + 1) & q->max_wqe_idx;
1339}
1340
1341/* discard the cqe for a given QP */
1342static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1343{
1344 unsigned long cq_flags;
1345 unsigned long flags;
1346 int discard_cnt = 0;
1347 u32 cur_getp, stop_getp;
1348 struct ocrdma_cqe *cqe;
1349 u32 qpn = 0;
1350
1351 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1352
1353 /* traverse through the CQEs in the hw CQ,
1354 * find the matching CQE for a given qp,
1355 * mark the matching one discarded by clearing qpn.
1356 * ring the doorbell in the poll_cq() as
1357 * we don't complete out of order cqe.
1358 */
1359
1360 cur_getp = cq->getp;
1361 /* find upto when do we reap the cq. */
1362 stop_getp = cur_getp;
1363 do {
1364 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1365 break;
1366
1367 cqe = cq->va + cur_getp;
1368 /* if (a) done reaping whole hw cq, or
1369 * (b) qp_xq becomes empty.
1370 * then exit
1371 */
1372 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1373 /* if previously discarded cqe found, skip that too. */
1374 /* check for matching qp */
1375 if (qpn == 0 || qpn != qp->id)
1376 goto skip_cqe;
1377
1378 /* mark cqe discarded so that it is not picked up later
1379 * in the poll_cq().
1380 */
1381 discard_cnt += 1;
1382 cqe->cmn.qpn = 0;
f99b1649 1383 if (is_cqe_for_sq(cqe)) {
fe2caefc 1384 ocrdma_hwq_inc_tail(&qp->sq);
f99b1649 1385 } else {
fe2caefc
PP
1386 if (qp->srq) {
1387 spin_lock_irqsave(&qp->srq->q_lock, flags);
1388 ocrdma_hwq_inc_tail(&qp->srq->rq);
1389 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1390 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1391
f99b1649 1392 } else {
fe2caefc 1393 ocrdma_hwq_inc_tail(&qp->rq);
f99b1649 1394 }
fe2caefc
PP
1395 }
1396skip_cqe:
1397 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1398 } while (cur_getp != stop_getp);
1399 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1400}
1401
f11220ee 1402void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
fe2caefc
PP
1403{
1404 int found = false;
1405 unsigned long flags;
1406 struct ocrdma_dev *dev = qp->dev;
1407 /* sync with any active CQ poll */
1408
1409 spin_lock_irqsave(&dev->flush_q_lock, flags);
1410 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1411 if (found)
1412 list_del(&qp->sq_entry);
1413 if (!qp->srq) {
1414 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1415 if (found)
1416 list_del(&qp->rq_entry);
1417 }
1418 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1419}
1420
1421int ocrdma_destroy_qp(struct ib_qp *ibqp)
1422{
1423 int status;
1424 struct ocrdma_pd *pd;
1425 struct ocrdma_qp *qp;
1426 struct ocrdma_dev *dev;
1427 struct ib_qp_attr attrs;
1428 int attr_mask = IB_QP_STATE;
d19081e0 1429 unsigned long flags;
fe2caefc
PP
1430
1431 qp = get_ocrdma_qp(ibqp);
1432 dev = qp->dev;
1433
1434 attrs.qp_state = IB_QPS_ERR;
1435 pd = qp->pd;
1436
1437 /* change the QP state to ERROR */
1438 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1439
1440 /* ensure that CQEs for newly created QP (whose id may be same with
1441 * one which just getting destroyed are same), dont get
1442 * discarded until the old CQEs are discarded.
1443 */
1444 mutex_lock(&dev->dev_lock);
1445 status = ocrdma_mbx_destroy_qp(dev, qp);
1446
1447 /*
1448 * acquire CQ lock while destroy is in progress, in order to
1449 * protect against proessing in-flight CQEs for this QP.
1450 */
d19081e0 1451 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
fe2caefc 1452 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
d19081e0 1453 spin_lock(&qp->rq_cq->cq_lock);
fe2caefc
PP
1454
1455 ocrdma_del_qpn_map(dev, qp);
1456
1457 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
d19081e0
DC
1458 spin_unlock(&qp->rq_cq->cq_lock);
1459 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
fe2caefc
PP
1460
1461 if (!pd->uctx) {
1462 ocrdma_discard_cqes(qp, qp->sq_cq);
1463 ocrdma_discard_cqes(qp, qp->rq_cq);
1464 }
1465 mutex_unlock(&dev->dev_lock);
1466
1467 if (pd->uctx) {
43a6b402
NG
1468 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1469 PAGE_ALIGN(qp->sq.len));
fe2caefc 1470 if (!qp->srq)
43a6b402
NG
1471 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1472 PAGE_ALIGN(qp->rq.len));
fe2caefc
PP
1473 }
1474
1475 ocrdma_del_flush_qp(qp);
1476
fe2caefc
PP
1477 kfree(qp->wqe_wr_id_tbl);
1478 kfree(qp->rqe_wr_id_tbl);
1479 kfree(qp);
1480 return status;
1481}
1482
1afc0454
NG
1483static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1484 struct ib_udata *udata)
fe2caefc
PP
1485{
1486 int status;
1487 struct ocrdma_create_srq_uresp uresp;
1488
63ea3749 1489 memset(&uresp, 0, sizeof(uresp));
fe2caefc
PP
1490 uresp.rq_dbid = srq->rq.dbid;
1491 uresp.num_rq_pages = 1;
1492 uresp.rq_page_addr[0] = srq->rq.pa;
1493 uresp.rq_page_size = srq->rq.len;
1afc0454
NG
1494 uresp.db_page_addr = dev->nic_info.unmapped_db +
1495 (srq->pd->id * dev->nic_info.db_page_size);
1496 uresp.db_page_size = dev->nic_info.db_page_size;
fe2caefc 1497 uresp.num_rqe_allocated = srq->rq.max_cnt;
1afc0454 1498 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
f11220ee 1499 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
fe2caefc
PP
1500 uresp.db_shift = 24;
1501 } else {
1502 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1503 uresp.db_shift = 16;
1504 }
1505
1506 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1507 if (status)
1508 return status;
1509 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1510 uresp.rq_page_size);
1511 if (status)
1512 return status;
1513 return status;
1514}
1515
1516struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1517 struct ib_srq_init_attr *init_attr,
1518 struct ib_udata *udata)
1519{
1520 int status = -ENOMEM;
1521 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
f99b1649 1522 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
fe2caefc
PP
1523 struct ocrdma_srq *srq;
1524
1525 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1526 return ERR_PTR(-EINVAL);
1527 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1528 return ERR_PTR(-EINVAL);
1529
1530 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1531 if (!srq)
1532 return ERR_PTR(status);
1533
1534 spin_lock_init(&srq->q_lock);
fe2caefc
PP
1535 srq->pd = pd;
1536 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1afc0454 1537 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
fe2caefc
PP
1538 if (status)
1539 goto err;
1540
1541 if (udata == NULL) {
1542 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1543 GFP_KERNEL);
1544 if (srq->rqe_wr_id_tbl == NULL)
1545 goto arm_err;
1546
1547 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1548 (srq->rq.max_cnt % 32 ? 1 : 0);
1549 srq->idx_bit_fields =
1550 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1551 if (srq->idx_bit_fields == NULL)
1552 goto arm_err;
1553 memset(srq->idx_bit_fields, 0xff,
1554 srq->bit_fields_len * sizeof(u32));
1555 }
1556
1557 if (init_attr->attr.srq_limit) {
1558 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1559 if (status)
1560 goto arm_err;
1561 }
1562
fe2caefc 1563 if (udata) {
1afc0454 1564 status = ocrdma_copy_srq_uresp(dev, srq, udata);
fe2caefc
PP
1565 if (status)
1566 goto arm_err;
1567 }
1568
fe2caefc
PP
1569 return &srq->ibsrq;
1570
1571arm_err:
1572 ocrdma_mbx_destroy_srq(dev, srq);
1573err:
1574 kfree(srq->rqe_wr_id_tbl);
1575 kfree(srq->idx_bit_fields);
1576 kfree(srq);
1577 return ERR_PTR(status);
1578}
1579
1580int ocrdma_modify_srq(struct ib_srq *ibsrq,
1581 struct ib_srq_attr *srq_attr,
1582 enum ib_srq_attr_mask srq_attr_mask,
1583 struct ib_udata *udata)
1584{
1585 int status = 0;
1586 struct ocrdma_srq *srq;
fe2caefc
PP
1587
1588 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1589 if (srq_attr_mask & IB_SRQ_MAX_WR)
1590 status = -EINVAL;
1591 else
1592 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1593 return status;
1594}
1595
1596int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1597{
1598 int status;
1599 struct ocrdma_srq *srq;
fe2caefc
PP
1600
1601 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1602 status = ocrdma_mbx_query_srq(srq, srq_attr);
1603 return status;
1604}
1605
1606int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1607{
1608 int status;
1609 struct ocrdma_srq *srq;
1afc0454 1610 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
fe2caefc
PP
1611
1612 srq = get_ocrdma_srq(ibsrq);
fe2caefc
PP
1613
1614 status = ocrdma_mbx_destroy_srq(dev, srq);
1615
1616 if (srq->pd->uctx)
43a6b402
NG
1617 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1618 PAGE_ALIGN(srq->rq.len));
fe2caefc 1619
fe2caefc
PP
1620 kfree(srq->idx_bit_fields);
1621 kfree(srq->rqe_wr_id_tbl);
1622 kfree(srq);
1623 return status;
1624}
1625
1626/* unprivileged verbs and their support functions. */
1627static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1628 struct ocrdma_hdr_wqe *hdr,
1629 struct ib_send_wr *wr)
1630{
1631 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1632 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1633 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1634
1635 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1636 if (qp->qp_type == IB_QPT_GSI)
1637 ud_hdr->qkey = qp->qkey;
1638 else
1639 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1640 ud_hdr->rsvd_ahid = ah->id;
1641}
1642
1643static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1644 struct ocrdma_sge *sge, int num_sge,
1645 struct ib_sge *sg_list)
1646{
1647 int i;
1648
1649 for (i = 0; i < num_sge; i++) {
1650 sge[i].lrkey = sg_list[i].lkey;
1651 sge[i].addr_lo = sg_list[i].addr;
1652 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1653 sge[i].len = sg_list[i].length;
1654 hdr->total_len += sg_list[i].length;
1655 }
1656 if (num_sge == 0)
1657 memset(sge, 0, sizeof(*sge));
1658}
1659
1660static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1661 struct ocrdma_hdr_wqe *hdr,
1662 struct ocrdma_sge *sge,
1663 struct ib_send_wr *wr, u32 wqe_size)
1664{
43a6b402 1665 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
fe2caefc 1666 if (wr->sg_list[0].length > qp->max_inline_data) {
ef99c4c2
NG
1667 pr_err("%s() supported_len=0x%x,\n"
1668 " unspported len req=0x%x\n", __func__,
1669 qp->max_inline_data, wr->sg_list[0].length);
fe2caefc
PP
1670 return -EINVAL;
1671 }
1672 memcpy(sge,
1673 (void *)(unsigned long)wr->sg_list[0].addr,
1674 wr->sg_list[0].length);
1675 hdr->total_len = wr->sg_list[0].length;
1676 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
43a6b402
NG
1677 if (0 == wr->sg_list[0].length)
1678 wqe_size += sizeof(struct ocrdma_sge);
fe2caefc
PP
1679 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1680 } else {
1681 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1682 if (wr->num_sge)
1683 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1684 else
1685 wqe_size += sizeof(struct ocrdma_sge);
1686 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1687 }
1688 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1689 return 0;
1690}
1691
1692static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1693 struct ib_send_wr *wr)
1694{
1695 int status;
1696 struct ocrdma_sge *sge;
1697 u32 wqe_size = sizeof(*hdr);
1698
1699 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1700 ocrdma_build_ud_hdr(qp, hdr, wr);
1701 sge = (struct ocrdma_sge *)(hdr + 2);
1702 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
f99b1649 1703 } else {
fe2caefc 1704 sge = (struct ocrdma_sge *)(hdr + 1);
f99b1649 1705 }
fe2caefc
PP
1706
1707 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1708 return status;
1709}
1710
1711static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1712 struct ib_send_wr *wr)
1713{
1714 int status;
1715 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1716 struct ocrdma_sge *sge = ext_rw + 1;
1717 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1718
1719 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1720 if (status)
1721 return status;
1722 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1723 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1724 ext_rw->lrkey = wr->wr.rdma.rkey;
1725 ext_rw->len = hdr->total_len;
1726 return 0;
1727}
1728
1729static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1730 struct ib_send_wr *wr)
1731{
1732 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1733 struct ocrdma_sge *sge = ext_rw + 1;
1734 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1735 sizeof(struct ocrdma_hdr_wqe);
1736
1737 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1738 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1739 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1740 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1741
1742 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1743 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1744 ext_rw->lrkey = wr->wr.rdma.rkey;
1745 ext_rw->len = hdr->total_len;
1746}
1747
7c33880c
NG
1748static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1749 struct ocrdma_hw_mr *hwmr)
1750{
1751 int i;
1752 u64 buf_addr = 0;
1753 int num_pbes;
1754 struct ocrdma_pbe *pbe;
1755
1756 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1757 num_pbes = 0;
1758
1759 /* go through the OS phy regions & fill hw pbe entries into pbls. */
1760 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1761 /* number of pbes can be more for one OS buf, when
1762 * buffers are of different sizes.
1763 * split the ib_buf to one or more pbes.
1764 */
1765 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1766 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1767 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1768 num_pbes += 1;
1769 pbe++;
1770
1771 /* if the pbl is full storing the pbes,
1772 * move to next pbl.
1773 */
1774 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1775 pbl_tbl++;
1776 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1777 }
1778 }
1779 return;
1780}
1781
1782static int get_encoded_page_size(int pg_sz)
1783{
1784 /* Max size is 256M 4096 << 16 */
1785 int i = 0;
1786 for (; i < 17; i++)
1787 if (pg_sz == (4096 << i))
1788 break;
1789 return i;
1790}
1791
1792
1793static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1794 struct ib_send_wr *wr)
1795{
1796 u64 fbo;
1797 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1798 struct ocrdma_mr *mr;
1799 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1800
1801 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
1802
1803 if ((wr->wr.fast_reg.page_list_len >
1804 qp->dev->attr.max_pages_per_frmr) ||
1805 (wr->wr.fast_reg.length > 0xffffffffULL))
1806 return -EINVAL;
1807
1808 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
1809 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1810
1811 if (wr->wr.fast_reg.page_list_len == 0)
1812 BUG();
1813 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
1814 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
1815 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
1816 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
1817 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
1818 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
1819 hdr->lkey = wr->wr.fast_reg.rkey;
1820 hdr->total_len = wr->wr.fast_reg.length;
1821
1822 fbo = wr->wr.fast_reg.iova_start -
1823 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
1824
1825 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
1826 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
1827 fast_reg->fbo_hi = upper_32_bits(fbo);
1828 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
1829 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
1830 fast_reg->size_sge =
1831 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
1832 mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
1833 (OCRDMA_MAX_STAG - 1)];
1834 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
1835 return 0;
1836}
1837
fe2caefc
PP
1838static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
1839{
1840 u32 val = qp->sq.dbid | (1 << 16);
1841
1842 iowrite32(val, qp->sq_db);
1843}
1844
1845int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1846 struct ib_send_wr **bad_wr)
1847{
1848 int status = 0;
1849 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1850 struct ocrdma_hdr_wqe *hdr;
1851 unsigned long flags;
1852
1853 spin_lock_irqsave(&qp->q_lock, flags);
1854 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1855 spin_unlock_irqrestore(&qp->q_lock, flags);
f6ddcf71 1856 *bad_wr = wr;
fe2caefc
PP
1857 return -EINVAL;
1858 }
1859
1860 while (wr) {
1861 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1862 wr->num_sge > qp->sq.max_sges) {
f6ddcf71 1863 *bad_wr = wr;
fe2caefc
PP
1864 status = -ENOMEM;
1865 break;
1866 }
1867 hdr = ocrdma_hwq_head(&qp->sq);
1868 hdr->cw = 0;
2b51a9b9 1869 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
fe2caefc
PP
1870 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1871 if (wr->send_flags & IB_SEND_FENCE)
1872 hdr->cw |=
1873 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
1874 if (wr->send_flags & IB_SEND_SOLICITED)
1875 hdr->cw |=
1876 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
1877 hdr->total_len = 0;
1878 switch (wr->opcode) {
1879 case IB_WR_SEND_WITH_IMM:
1880 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1881 hdr->immdt = ntohl(wr->ex.imm_data);
1882 case IB_WR_SEND:
1883 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1884 ocrdma_build_send(qp, hdr, wr);
1885 break;
1886 case IB_WR_SEND_WITH_INV:
1887 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1888 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1889 hdr->lkey = wr->ex.invalidate_rkey;
1890 status = ocrdma_build_send(qp, hdr, wr);
1891 break;
1892 case IB_WR_RDMA_WRITE_WITH_IMM:
1893 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1894 hdr->immdt = ntohl(wr->ex.imm_data);
1895 case IB_WR_RDMA_WRITE:
1896 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
1897 status = ocrdma_build_write(qp, hdr, wr);
1898 break;
1899 case IB_WR_RDMA_READ_WITH_INV:
1900 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1901 case IB_WR_RDMA_READ:
1902 ocrdma_build_read(qp, hdr, wr);
1903 break;
1904 case IB_WR_LOCAL_INV:
1905 hdr->cw |=
1906 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
7c33880c
NG
1907 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
1908 sizeof(struct ocrdma_sge)) /
fe2caefc
PP
1909 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
1910 hdr->lkey = wr->ex.invalidate_rkey;
1911 break;
7c33880c
NG
1912 case IB_WR_FAST_REG_MR:
1913 status = ocrdma_build_fr(qp, hdr, wr);
1914 break;
fe2caefc
PP
1915 default:
1916 status = -EINVAL;
1917 break;
1918 }
1919 if (status) {
1920 *bad_wr = wr;
1921 break;
1922 }
2b51a9b9 1923 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
fe2caefc
PP
1924 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
1925 else
1926 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
1927 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
1928 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
1929 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
1930 /* make sure wqe is written before adapter can access it */
1931 wmb();
1932 /* inform hw to start processing it */
1933 ocrdma_ring_sq_db(qp);
1934
1935 /* update pointer, counter for next wr */
1936 ocrdma_hwq_inc_head(&qp->sq);
1937 wr = wr->next;
1938 }
1939 spin_unlock_irqrestore(&qp->q_lock, flags);
1940 return status;
1941}
1942
1943static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
1944{
df176ea0 1945 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
fe2caefc 1946
45e86b33
NG
1947 if (qp->state != OCRDMA_QPS_INIT)
1948 iowrite32(val, qp->rq_db);
1949 else
1950 qp->db_cache++;
fe2caefc
PP
1951}
1952
1953static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
1954 u16 tag)
1955{
1956 u32 wqe_size = 0;
1957 struct ocrdma_sge *sge;
1958 if (wr->num_sge)
1959 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
1960 else
1961 wqe_size = sizeof(*sge) + sizeof(*rqe);
1962
1963 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
1964 OCRDMA_WQE_SIZE_SHIFT);
1965 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1966 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1967 rqe->total_len = 0;
1968 rqe->rsvd_tag = tag;
1969 sge = (struct ocrdma_sge *)(rqe + 1);
1970 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
1971 ocrdma_cpu_to_le32(rqe, wqe_size);
1972}
1973
1974int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1975 struct ib_recv_wr **bad_wr)
1976{
1977 int status = 0;
1978 unsigned long flags;
1979 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1980 struct ocrdma_hdr_wqe *rqe;
1981
1982 spin_lock_irqsave(&qp->q_lock, flags);
1983 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
1984 spin_unlock_irqrestore(&qp->q_lock, flags);
1985 *bad_wr = wr;
1986 return -EINVAL;
1987 }
1988 while (wr) {
1989 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
1990 wr->num_sge > qp->rq.max_sges) {
1991 *bad_wr = wr;
1992 status = -ENOMEM;
1993 break;
1994 }
1995 rqe = ocrdma_hwq_head(&qp->rq);
1996 ocrdma_build_rqe(rqe, wr, 0);
1997
1998 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
1999 /* make sure rqe is written before adapter can access it */
2000 wmb();
2001
2002 /* inform hw to start processing it */
2003 ocrdma_ring_rq_db(qp);
2004
2005 /* update pointer, counter for next wr */
2006 ocrdma_hwq_inc_head(&qp->rq);
2007 wr = wr->next;
2008 }
2009 spin_unlock_irqrestore(&qp->q_lock, flags);
2010 return status;
2011}
2012
2013/* cqe for srq's rqe can potentially arrive out of order.
2014 * index gives the entry in the shadow table where to store
2015 * the wr_id. tag/index is returned in cqe to reference back
2016 * for a given rqe.
2017 */
2018static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2019{
2020 int row = 0;
2021 int indx = 0;
2022
2023 for (row = 0; row < srq->bit_fields_len; row++) {
2024 if (srq->idx_bit_fields[row]) {
2025 indx = ffs(srq->idx_bit_fields[row]);
2026 indx = (row * 32) + (indx - 1);
2027 if (indx >= srq->rq.max_cnt)
2028 BUG();
2029 ocrdma_srq_toggle_bit(srq, indx);
2030 break;
2031 }
2032 }
2033
2034 if (row == srq->bit_fields_len)
2035 BUG();
2036 return indx;
2037}
2038
2039static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2040{
2041 u32 val = srq->rq.dbid | (1 << 16);
2042
2043 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2044}
2045
2046int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2047 struct ib_recv_wr **bad_wr)
2048{
2049 int status = 0;
2050 unsigned long flags;
2051 struct ocrdma_srq *srq;
2052 struct ocrdma_hdr_wqe *rqe;
2053 u16 tag;
2054
2055 srq = get_ocrdma_srq(ibsrq);
2056
2057 spin_lock_irqsave(&srq->q_lock, flags);
2058 while (wr) {
2059 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2060 wr->num_sge > srq->rq.max_sges) {
2061 status = -ENOMEM;
2062 *bad_wr = wr;
2063 break;
2064 }
2065 tag = ocrdma_srq_get_idx(srq);
2066 rqe = ocrdma_hwq_head(&srq->rq);
2067 ocrdma_build_rqe(rqe, wr, tag);
2068
2069 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2070 /* make sure rqe is written before adapter can perform DMA */
2071 wmb();
2072 /* inform hw to start processing it */
2073 ocrdma_ring_srq_db(srq);
2074 /* update pointer, counter for next wr */
2075 ocrdma_hwq_inc_head(&srq->rq);
2076 wr = wr->next;
2077 }
2078 spin_unlock_irqrestore(&srq->q_lock, flags);
2079 return status;
2080}
2081
2082static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2083{
f99b1649 2084 enum ib_wc_status ibwc_status;
fe2caefc
PP
2085
2086 switch (status) {
2087 case OCRDMA_CQE_GENERAL_ERR:
2088 ibwc_status = IB_WC_GENERAL_ERR;
2089 break;
2090 case OCRDMA_CQE_LOC_LEN_ERR:
2091 ibwc_status = IB_WC_LOC_LEN_ERR;
2092 break;
2093 case OCRDMA_CQE_LOC_QP_OP_ERR:
2094 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2095 break;
2096 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2097 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2098 break;
2099 case OCRDMA_CQE_LOC_PROT_ERR:
2100 ibwc_status = IB_WC_LOC_PROT_ERR;
2101 break;
2102 case OCRDMA_CQE_WR_FLUSH_ERR:
2103 ibwc_status = IB_WC_WR_FLUSH_ERR;
2104 break;
2105 case OCRDMA_CQE_MW_BIND_ERR:
2106 ibwc_status = IB_WC_MW_BIND_ERR;
2107 break;
2108 case OCRDMA_CQE_BAD_RESP_ERR:
2109 ibwc_status = IB_WC_BAD_RESP_ERR;
2110 break;
2111 case OCRDMA_CQE_LOC_ACCESS_ERR:
2112 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2113 break;
2114 case OCRDMA_CQE_REM_INV_REQ_ERR:
2115 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2116 break;
2117 case OCRDMA_CQE_REM_ACCESS_ERR:
2118 ibwc_status = IB_WC_REM_ACCESS_ERR;
2119 break;
2120 case OCRDMA_CQE_REM_OP_ERR:
2121 ibwc_status = IB_WC_REM_OP_ERR;
2122 break;
2123 case OCRDMA_CQE_RETRY_EXC_ERR:
2124 ibwc_status = IB_WC_RETRY_EXC_ERR;
2125 break;
2126 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2127 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2128 break;
2129 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2130 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2131 break;
2132 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2133 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2134 break;
2135 case OCRDMA_CQE_REM_ABORT_ERR:
2136 ibwc_status = IB_WC_REM_ABORT_ERR;
2137 break;
2138 case OCRDMA_CQE_INV_EECN_ERR:
2139 ibwc_status = IB_WC_INV_EECN_ERR;
2140 break;
2141 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2142 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2143 break;
2144 case OCRDMA_CQE_FATAL_ERR:
2145 ibwc_status = IB_WC_FATAL_ERR;
2146 break;
2147 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2148 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2149 break;
2150 default:
2151 ibwc_status = IB_WC_GENERAL_ERR;
2152 break;
2153 };
2154 return ibwc_status;
2155}
2156
2157static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2158 u32 wqe_idx)
2159{
2160 struct ocrdma_hdr_wqe *hdr;
2161 struct ocrdma_sge *rw;
2162 int opcode;
2163
2164 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2165
2166 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2167 /* Undo the hdr->cw swap */
2168 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2169 switch (opcode) {
2170 case OCRDMA_WRITE:
2171 ibwc->opcode = IB_WC_RDMA_WRITE;
2172 break;
2173 case OCRDMA_READ:
2174 rw = (struct ocrdma_sge *)(hdr + 1);
2175 ibwc->opcode = IB_WC_RDMA_READ;
2176 ibwc->byte_len = rw->len;
2177 break;
2178 case OCRDMA_SEND:
2179 ibwc->opcode = IB_WC_SEND;
2180 break;
7c33880c
NG
2181 case OCRDMA_FR_MR:
2182 ibwc->opcode = IB_WC_FAST_REG_MR;
2183 break;
fe2caefc
PP
2184 case OCRDMA_LKEY_INV:
2185 ibwc->opcode = IB_WC_LOCAL_INV;
2186 break;
2187 default:
2188 ibwc->status = IB_WC_GENERAL_ERR;
ef99c4c2
NG
2189 pr_err("%s() invalid opcode received = 0x%x\n",
2190 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
fe2caefc
PP
2191 break;
2192 };
2193}
2194
2195static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2196 struct ocrdma_cqe *cqe)
2197{
2198 if (is_cqe_for_sq(cqe)) {
2199 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2200 cqe->flags_status_srcqpn) &
2201 ~OCRDMA_CQE_STATUS_MASK);
2202 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2203 cqe->flags_status_srcqpn) |
2204 (OCRDMA_CQE_WR_FLUSH_ERR <<
2205 OCRDMA_CQE_STATUS_SHIFT));
2206 } else {
2207 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2208 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2209 cqe->flags_status_srcqpn) &
2210 ~OCRDMA_CQE_UD_STATUS_MASK);
2211 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2212 cqe->flags_status_srcqpn) |
2213 (OCRDMA_CQE_WR_FLUSH_ERR <<
2214 OCRDMA_CQE_UD_STATUS_SHIFT));
2215 } else {
2216 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2217 cqe->flags_status_srcqpn) &
2218 ~OCRDMA_CQE_STATUS_MASK);
2219 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2220 cqe->flags_status_srcqpn) |
2221 (OCRDMA_CQE_WR_FLUSH_ERR <<
2222 OCRDMA_CQE_STATUS_SHIFT));
2223 }
2224 }
2225}
2226
2227static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2228 struct ocrdma_qp *qp, int status)
2229{
2230 bool expand = false;
2231
2232 ibwc->byte_len = 0;
2233 ibwc->qp = &qp->ibqp;
2234 ibwc->status = ocrdma_to_ibwc_err(status);
2235
2236 ocrdma_flush_qp(qp);
057729cb 2237 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
fe2caefc
PP
2238
2239 /* if wqe/rqe pending for which cqe needs to be returned,
2240 * trigger inflating it.
2241 */
2242 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2243 expand = true;
2244 ocrdma_set_cqe_status_flushed(qp, cqe);
2245 }
2246 return expand;
2247}
2248
2249static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2250 struct ocrdma_qp *qp, int status)
2251{
2252 ibwc->opcode = IB_WC_RECV;
2253 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2254 ocrdma_hwq_inc_tail(&qp->rq);
2255
2256 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2257}
2258
2259static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2260 struct ocrdma_qp *qp, int status)
2261{
2262 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2263 ocrdma_hwq_inc_tail(&qp->sq);
2264
2265 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2266}
2267
2268
2269static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2270 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2271 bool *polled, bool *stop)
2272{
2273 bool expand;
2274 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2275 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2276
2277 /* when hw sq is empty, but rq is not empty, so we continue
2278 * to keep the cqe in order to get the cq event again.
2279 */
2280 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2281 /* when cq for rq and sq is same, it is safe to return
2282 * flush cqe for RQEs.
2283 */
2284 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2285 *polled = true;
2286 status = OCRDMA_CQE_WR_FLUSH_ERR;
2287 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2288 } else {
2289 /* stop processing further cqe as this cqe is used for
2290 * triggering cq event on buddy cq of RQ.
2291 * When QP is destroyed, this cqe will be removed
2292 * from the cq's hardware q.
2293 */
2294 *polled = false;
2295 *stop = true;
2296 expand = false;
2297 }
2298 } else {
2299 *polled = true;
2300 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2301 }
2302 return expand;
2303}
2304
2305static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2306 struct ocrdma_cqe *cqe,
2307 struct ib_wc *ibwc, bool *polled)
2308{
2309 bool expand = false;
2310 int tail = qp->sq.tail;
2311 u32 wqe_idx;
2312
2313 if (!qp->wqe_wr_id_tbl[tail].signaled) {
fe2caefc
PP
2314 *polled = false; /* WC cannot be consumed yet */
2315 } else {
2316 ibwc->status = IB_WC_SUCCESS;
2317 ibwc->wc_flags = 0;
2318 ibwc->qp = &qp->ibqp;
2319 ocrdma_update_wc(qp, ibwc, tail);
2320 *polled = true;
fe2caefc 2321 }
43a6b402
NG
2322 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2323 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
ae3bca90
PP
2324 if (tail != wqe_idx)
2325 expand = true; /* Coalesced CQE can't be consumed yet */
2326
fe2caefc
PP
2327 ocrdma_hwq_inc_tail(&qp->sq);
2328 return expand;
2329}
2330
2331static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2332 struct ib_wc *ibwc, bool *polled, bool *stop)
2333{
2334 int status;
2335 bool expand;
2336
2337 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2338 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2339
2340 if (status == OCRDMA_CQE_SUCCESS)
2341 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2342 else
2343 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2344 return expand;
2345}
2346
2347static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2348{
2349 int status;
2350
2351 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2352 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2353 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2354 OCRDMA_CQE_SRCQP_MASK;
2355 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2356 OCRDMA_CQE_PKEY_MASK;
2357 ibwc->wc_flags = IB_WC_GRH;
2358 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2359 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2360 return status;
2361}
2362
2363static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2364 struct ocrdma_cqe *cqe,
2365 struct ocrdma_qp *qp)
2366{
2367 unsigned long flags;
2368 struct ocrdma_srq *srq;
2369 u32 wqe_idx;
2370
2371 srq = get_ocrdma_srq(qp->ibqp.srq);
43a6b402
NG
2372 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2373 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
fe2caefc
PP
2374 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2375 spin_lock_irqsave(&srq->q_lock, flags);
2376 ocrdma_srq_toggle_bit(srq, wqe_idx);
2377 spin_unlock_irqrestore(&srq->q_lock, flags);
2378 ocrdma_hwq_inc_tail(&srq->rq);
2379}
2380
2381static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2382 struct ib_wc *ibwc, bool *polled, bool *stop,
2383 int status)
2384{
2385 bool expand;
2386
2387 /* when hw_rq is empty, but wq is not empty, so continue
2388 * to keep the cqe to get the cq event again.
2389 */
2390 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2391 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2392 *polled = true;
2393 status = OCRDMA_CQE_WR_FLUSH_ERR;
2394 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2395 } else {
2396 *polled = false;
2397 *stop = true;
2398 expand = false;
2399 }
a3698a9b
PP
2400 } else {
2401 *polled = true;
fe2caefc 2402 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
a3698a9b 2403 }
fe2caefc
PP
2404 return expand;
2405}
2406
2407static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2408 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2409{
2410 ibwc->opcode = IB_WC_RECV;
2411 ibwc->qp = &qp->ibqp;
2412 ibwc->status = IB_WC_SUCCESS;
2413
2414 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2415 ocrdma_update_ud_rcqe(ibwc, cqe);
2416 else
2417 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2418
2419 if (is_cqe_imm(cqe)) {
2420 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2421 ibwc->wc_flags |= IB_WC_WITH_IMM;
2422 } else if (is_cqe_wr_imm(cqe)) {
2423 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2424 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2425 ibwc->wc_flags |= IB_WC_WITH_IMM;
2426 } else if (is_cqe_invalidated(cqe)) {
2427 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2428 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2429 }
f99b1649 2430 if (qp->ibqp.srq) {
fe2caefc 2431 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
f99b1649 2432 } else {
fe2caefc
PP
2433 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2434 ocrdma_hwq_inc_tail(&qp->rq);
2435 }
2436}
2437
2438static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2439 struct ib_wc *ibwc, bool *polled, bool *stop)
2440{
2441 int status;
2442 bool expand = false;
2443
2444 ibwc->wc_flags = 0;
f99b1649 2445 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
fe2caefc
PP
2446 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2447 OCRDMA_CQE_UD_STATUS_MASK) >>
2448 OCRDMA_CQE_UD_STATUS_SHIFT;
f99b1649 2449 } else {
fe2caefc
PP
2450 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2451 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
f99b1649 2452 }
fe2caefc
PP
2453
2454 if (status == OCRDMA_CQE_SUCCESS) {
2455 *polled = true;
2456 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2457 } else {
2458 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2459 status);
2460 }
2461 return expand;
2462}
2463
2464static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2465 u16 cur_getp)
2466{
2467 if (cq->phase_change) {
2468 if (cur_getp == 0)
2469 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
f99b1649 2470 } else {
fe2caefc
PP
2471 /* clear valid bit */
2472 cqe->flags_status_srcqpn = 0;
f99b1649 2473 }
fe2caefc
PP
2474}
2475
2476static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2477 struct ib_wc *ibwc)
2478{
2479 u16 qpn = 0;
2480 int i = 0;
2481 bool expand = false;
2482 int polled_hw_cqes = 0;
2483 struct ocrdma_qp *qp = NULL;
1afc0454 2484 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
fe2caefc
PP
2485 struct ocrdma_cqe *cqe;
2486 u16 cur_getp; bool polled = false; bool stop = false;
2487
2488 cur_getp = cq->getp;
2489 while (num_entries) {
2490 cqe = cq->va + cur_getp;
2491 /* check whether valid cqe or not */
2492 if (!is_cqe_valid(cq, cqe))
2493 break;
2494 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2495 /* ignore discarded cqe */
2496 if (qpn == 0)
2497 goto skip_cqe;
2498 qp = dev->qp_tbl[qpn];
2499 BUG_ON(qp == NULL);
2500
2501 if (is_cqe_for_sq(cqe)) {
2502 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2503 &stop);
2504 } else {
2505 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2506 &stop);
2507 }
2508 if (expand)
2509 goto expand_cqe;
2510 if (stop)
2511 goto stop_cqe;
2512 /* clear qpn to avoid duplicate processing by discard_cqe() */
2513 cqe->cmn.qpn = 0;
2514skip_cqe:
2515 polled_hw_cqes += 1;
2516 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2517 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2518expand_cqe:
2519 if (polled) {
2520 num_entries -= 1;
2521 i += 1;
2522 ibwc = ibwc + 1;
2523 polled = false;
2524 }
2525 }
2526stop_cqe:
2527 cq->getp = cur_getp;
2528 if (polled_hw_cqes || expand || stop) {
2529 ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2530 polled_hw_cqes);
2531 }
2532 return i;
2533}
2534
2535/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2536static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2537 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2538{
2539 int err_cqes = 0;
2540
2541 while (num_entries) {
2542 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2543 break;
2544 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2545 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2546 ocrdma_hwq_inc_tail(&qp->sq);
2547 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2548 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2549 ocrdma_hwq_inc_tail(&qp->rq);
f99b1649 2550 } else {
fe2caefc 2551 return err_cqes;
f99b1649 2552 }
fe2caefc
PP
2553 ibwc->byte_len = 0;
2554 ibwc->status = IB_WC_WR_FLUSH_ERR;
2555 ibwc = ibwc + 1;
2556 err_cqes += 1;
2557 num_entries -= 1;
2558 }
2559 return err_cqes;
2560}
2561
2562int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2563{
2564 int cqes_to_poll = num_entries;
1afc0454
NG
2565 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2566 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc
PP
2567 int num_os_cqe = 0, err_cqes = 0;
2568 struct ocrdma_qp *qp;
1afc0454 2569 unsigned long flags;
fe2caefc
PP
2570
2571 /* poll cqes from adapter CQ */
2572 spin_lock_irqsave(&cq->cq_lock, flags);
2573 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2574 spin_unlock_irqrestore(&cq->cq_lock, flags);
2575 cqes_to_poll -= num_os_cqe;
2576
2577 if (cqes_to_poll) {
2578 wc = wc + num_os_cqe;
2579 /* adapter returns single error cqe when qp moves to
2580 * error state. So insert error cqes with wc_status as
2581 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2582 * respectively which uses this CQ.
2583 */
2584 spin_lock_irqsave(&dev->flush_q_lock, flags);
2585 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2586 if (cqes_to_poll == 0)
2587 break;
2588 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2589 cqes_to_poll -= err_cqes;
2590 num_os_cqe += err_cqes;
2591 wc = wc + err_cqes;
2592 }
2593 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2594 }
2595 return num_os_cqe;
2596}
2597
2598int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2599{
1afc0454
NG
2600 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2601 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
fe2caefc
PP
2602 u16 cq_id;
2603 u16 cur_getp;
2604 struct ocrdma_cqe *cqe;
1afc0454 2605 unsigned long flags;
fe2caefc 2606
fe2caefc 2607 cq_id = cq->id;
fe2caefc
PP
2608
2609 spin_lock_irqsave(&cq->cq_lock, flags);
2610 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2611 cq->armed = true;
2612 if (cq_flags & IB_CQ_SOLICITED)
2613 cq->solicited = true;
2614
2615 cur_getp = cq->getp;
2616 cqe = cq->va + cur_getp;
2617
2618 /* check whether any valid cqe exist or not, if not then safe to
2619 * arm. If cqe is not yet consumed, then let it get consumed and then
2620 * we arm it to avoid false interrupts.
2621 */
2622 if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2623 cq->arm_needed = false;
2624 ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2625 }
2626 spin_unlock_irqrestore(&cq->cq_lock, flags);
2627 return 0;
2628}
7c33880c
NG
2629
2630struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2631{
2632 int status;
2633 struct ocrdma_mr *mr;
2634 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2635 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2636
2637 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2638 return ERR_PTR(-EINVAL);
2639
2640 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2641 if (!mr)
2642 return ERR_PTR(-ENOMEM);
2643
2644 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2645 if (status)
2646 goto pbl_err;
2647 mr->hwmr.fr_mr = 1;
2648 mr->hwmr.remote_rd = 0;
2649 mr->hwmr.remote_wr = 0;
2650 mr->hwmr.local_rd = 0;
2651 mr->hwmr.local_wr = 0;
2652 mr->hwmr.mw_bind = 0;
2653 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2654 if (status)
2655 goto pbl_err;
2656 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2657 if (status)
2658 goto mbx_err;
2659 mr->ibmr.rkey = mr->hwmr.lkey;
2660 mr->ibmr.lkey = mr->hwmr.lkey;
2661 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64) mr;
2662 return &mr->ibmr;
2663mbx_err:
2664 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2665pbl_err:
2666 kfree(mr);
2667 return ERR_PTR(-ENOMEM);
2668}
2669
2670struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2671 *ibdev,
2672 int page_list_len)
2673{
2674 struct ib_fast_reg_page_list *frmr_list;
2675 int size;
2676
2677 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2678 frmr_list = kzalloc(size, GFP_KERNEL);
2679 if (!frmr_list)
2680 return ERR_PTR(-ENOMEM);
2681 frmr_list->page_list = (u64 *)(frmr_list + 1);
2682 return frmr_list;
2683}
2684
2685void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2686{
2687 kfree(page_list);
2688}