1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
52 #include "ocrdma_hw.h"
53 #include "ocrdma_verbs.h"
54 #include <rdma/ocrdma-abi.h>
56 int ocrdma_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
65 int ocrdma_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*attr
,
68 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibdev
);
70 if (uhw
->inlen
|| uhw
->outlen
)
73 memset(attr
, 0, sizeof *attr
);
74 memcpy(&attr
->fw_ver
, &dev
->attr
.fw_ver
[0],
75 min(sizeof(dev
->attr
.fw_ver
), sizeof(attr
->fw_ver
)));
76 ocrdma_get_guid(dev
, (u8
*)&attr
->sys_image_guid
);
77 attr
->max_mr_size
= dev
->attr
.max_mr_size
;
78 attr
->page_size_cap
= 0xffff000;
79 attr
->vendor_id
= dev
->nic_info
.pdev
->vendor
;
80 attr
->vendor_part_id
= dev
->nic_info
.pdev
->device
;
81 attr
->hw_ver
= dev
->asic_id
;
82 attr
->max_qp
= dev
->attr
.max_qp
;
83 attr
->max_ah
= OCRDMA_MAX_AH
;
84 attr
->max_qp_wr
= dev
->attr
.max_wqe
;
86 attr
->device_cap_flags
= IB_DEVICE_CURR_QP_STATE_MOD
|
87 IB_DEVICE_RC_RNR_NAK_GEN
|
88 IB_DEVICE_SHUTDOWN_PORT
|
89 IB_DEVICE_SYS_IMAGE_GUID
|
90 IB_DEVICE_LOCAL_DMA_LKEY
|
91 IB_DEVICE_MEM_MGT_EXTENSIONS
;
92 attr
->max_send_sge
= dev
->attr
.max_send_sge
;
93 attr
->max_recv_sge
= dev
->attr
.max_recv_sge
;
94 attr
->max_sge_rd
= dev
->attr
.max_rdma_sge
;
95 attr
->max_cq
= dev
->attr
.max_cq
;
96 attr
->max_cqe
= dev
->attr
.max_cqe
;
97 attr
->max_mr
= dev
->attr
.max_mr
;
98 attr
->max_mw
= dev
->attr
.max_mw
;
99 attr
->max_pd
= dev
->attr
.max_pd
;
100 attr
->atomic_cap
= 0;
102 attr
->max_map_per_fmr
= 0;
103 attr
->max_qp_rd_atom
=
104 min(dev
->attr
.max_ord_per_qp
, dev
->attr
.max_ird_per_qp
);
105 attr
->max_qp_init_rd_atom
= dev
->attr
.max_ord_per_qp
;
106 attr
->max_srq
= dev
->attr
.max_srq
;
107 attr
->max_srq_sge
= dev
->attr
.max_srq_sge
;
108 attr
->max_srq_wr
= dev
->attr
.max_rqe
;
109 attr
->local_ca_ack_delay
= dev
->attr
.local_ca_ack_delay
;
110 attr
->max_fast_reg_page_list_len
= dev
->attr
.max_pages_per_frmr
;
115 struct net_device
*ocrdma_get_netdev(struct ib_device
*ibdev
, u8 port_num
)
117 struct ocrdma_dev
*dev
;
118 struct net_device
*ndev
= NULL
;
122 dev
= get_ocrdma_dev(ibdev
);
124 ndev
= dev
->nic_info
.netdev
;
133 static inline void get_link_speed_and_width(struct ocrdma_dev
*dev
,
134 u8
*ib_speed
, u8
*ib_width
)
139 status
= ocrdma_mbx_get_link_speed(dev
, &speed
, NULL
);
141 speed
= OCRDMA_PHYS_LINK_SPEED_ZERO
;
144 case OCRDMA_PHYS_LINK_SPEED_1GBPS
:
145 *ib_speed
= IB_SPEED_SDR
;
146 *ib_width
= IB_WIDTH_1X
;
149 case OCRDMA_PHYS_LINK_SPEED_10GBPS
:
150 *ib_speed
= IB_SPEED_QDR
;
151 *ib_width
= IB_WIDTH_1X
;
154 case OCRDMA_PHYS_LINK_SPEED_20GBPS
:
155 *ib_speed
= IB_SPEED_DDR
;
156 *ib_width
= IB_WIDTH_4X
;
159 case OCRDMA_PHYS_LINK_SPEED_40GBPS
:
160 *ib_speed
= IB_SPEED_QDR
;
161 *ib_width
= IB_WIDTH_4X
;
166 *ib_speed
= IB_SPEED_SDR
;
167 *ib_width
= IB_WIDTH_1X
;
171 int ocrdma_query_port(struct ib_device
*ibdev
,
172 u8 port
, struct ib_port_attr
*props
)
174 enum ib_port_state port_state
;
175 struct ocrdma_dev
*dev
;
176 struct net_device
*netdev
;
178 /* props being zeroed by the caller, avoid zeroing it here */
179 dev
= get_ocrdma_dev(ibdev
);
180 netdev
= dev
->nic_info
.netdev
;
181 if (netif_running(netdev
) && netif_oper_up(netdev
)) {
182 port_state
= IB_PORT_ACTIVE
;
183 props
->phys_state
= 5;
185 port_state
= IB_PORT_DOWN
;
186 props
->phys_state
= 3;
188 props
->max_mtu
= IB_MTU_4096
;
189 props
->active_mtu
= iboe_get_mtu(netdev
->mtu
);
194 props
->state
= port_state
;
195 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
196 IB_PORT_DEVICE_MGMT_SUP
|
197 IB_PORT_VENDOR_CLASS_SUP
;
198 props
->ip_gids
= true;
199 props
->gid_tbl_len
= OCRDMA_MAX_SGID
;
200 props
->pkey_tbl_len
= 1;
201 props
->bad_pkey_cntr
= 0;
202 props
->qkey_viol_cntr
= 0;
203 get_link_speed_and_width(dev
, &props
->active_speed
,
204 &props
->active_width
);
205 props
->max_msg_sz
= 0x80000000;
206 props
->max_vl_num
= 4;
210 int ocrdma_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
211 struct ib_port_modify
*props
)
216 static int ocrdma_add_mmap(struct ocrdma_ucontext
*uctx
, u64 phy_addr
,
219 struct ocrdma_mm
*mm
;
221 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
224 mm
->key
.phy_addr
= phy_addr
;
226 INIT_LIST_HEAD(&mm
->entry
);
228 mutex_lock(&uctx
->mm_list_lock
);
229 list_add_tail(&mm
->entry
, &uctx
->mm_head
);
230 mutex_unlock(&uctx
->mm_list_lock
);
234 static void ocrdma_del_mmap(struct ocrdma_ucontext
*uctx
, u64 phy_addr
,
237 struct ocrdma_mm
*mm
, *tmp
;
239 mutex_lock(&uctx
->mm_list_lock
);
240 list_for_each_entry_safe(mm
, tmp
, &uctx
->mm_head
, entry
) {
241 if (len
!= mm
->key
.len
&& phy_addr
!= mm
->key
.phy_addr
)
244 list_del(&mm
->entry
);
248 mutex_unlock(&uctx
->mm_list_lock
);
251 static bool ocrdma_search_mmap(struct ocrdma_ucontext
*uctx
, u64 phy_addr
,
255 struct ocrdma_mm
*mm
;
257 mutex_lock(&uctx
->mm_list_lock
);
258 list_for_each_entry(mm
, &uctx
->mm_head
, entry
) {
259 if (len
!= mm
->key
.len
&& phy_addr
!= mm
->key
.phy_addr
)
265 mutex_unlock(&uctx
->mm_list_lock
);
270 static u16
_ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev
*dev
, bool dpp_pool
)
272 u16 pd_bitmap_idx
= 0;
273 const unsigned long *pd_bitmap
;
276 pd_bitmap
= dev
->pd_mgr
->pd_dpp_bitmap
;
277 pd_bitmap_idx
= find_first_zero_bit(pd_bitmap
,
278 dev
->pd_mgr
->max_dpp_pd
);
279 __set_bit(pd_bitmap_idx
, dev
->pd_mgr
->pd_dpp_bitmap
);
280 dev
->pd_mgr
->pd_dpp_count
++;
281 if (dev
->pd_mgr
->pd_dpp_count
> dev
->pd_mgr
->pd_dpp_thrsh
)
282 dev
->pd_mgr
->pd_dpp_thrsh
= dev
->pd_mgr
->pd_dpp_count
;
284 pd_bitmap
= dev
->pd_mgr
->pd_norm_bitmap
;
285 pd_bitmap_idx
= find_first_zero_bit(pd_bitmap
,
286 dev
->pd_mgr
->max_normal_pd
);
287 __set_bit(pd_bitmap_idx
, dev
->pd_mgr
->pd_norm_bitmap
);
288 dev
->pd_mgr
->pd_norm_count
++;
289 if (dev
->pd_mgr
->pd_norm_count
> dev
->pd_mgr
->pd_norm_thrsh
)
290 dev
->pd_mgr
->pd_norm_thrsh
= dev
->pd_mgr
->pd_norm_count
;
292 return pd_bitmap_idx
;
295 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev
*dev
, u16 pd_id
,
301 pd_count
= dpp_pool
? dev
->pd_mgr
->pd_dpp_count
:
302 dev
->pd_mgr
->pd_norm_count
;
307 pd_bit_index
= pd_id
- dev
->pd_mgr
->pd_dpp_start
;
308 if (pd_bit_index
>= dev
->pd_mgr
->max_dpp_pd
) {
311 __clear_bit(pd_bit_index
, dev
->pd_mgr
->pd_dpp_bitmap
);
312 dev
->pd_mgr
->pd_dpp_count
--;
315 pd_bit_index
= pd_id
- dev
->pd_mgr
->pd_norm_start
;
316 if (pd_bit_index
>= dev
->pd_mgr
->max_normal_pd
) {
319 __clear_bit(pd_bit_index
, dev
->pd_mgr
->pd_norm_bitmap
);
320 dev
->pd_mgr
->pd_norm_count
--;
327 static int ocrdma_put_pd_num(struct ocrdma_dev
*dev
, u16 pd_id
,
332 mutex_lock(&dev
->dev_lock
);
333 status
= _ocrdma_pd_mgr_put_bitmap(dev
, pd_id
, dpp_pool
);
334 mutex_unlock(&dev
->dev_lock
);
338 static int ocrdma_get_pd_num(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
)
343 mutex_lock(&dev
->dev_lock
);
344 if (pd
->dpp_enabled
) {
345 /* try allocating DPP PD, if not available then normal PD */
346 if (dev
->pd_mgr
->pd_dpp_count
< dev
->pd_mgr
->max_dpp_pd
) {
347 pd_idx
= _ocrdma_pd_mgr_get_bitmap(dev
, true);
348 pd
->id
= dev
->pd_mgr
->pd_dpp_start
+ pd_idx
;
349 pd
->dpp_page
= dev
->pd_mgr
->dpp_page_index
+ pd_idx
;
350 } else if (dev
->pd_mgr
->pd_norm_count
<
351 dev
->pd_mgr
->max_normal_pd
) {
352 pd_idx
= _ocrdma_pd_mgr_get_bitmap(dev
, false);
353 pd
->id
= dev
->pd_mgr
->pd_norm_start
+ pd_idx
;
354 pd
->dpp_enabled
= false;
359 if (dev
->pd_mgr
->pd_norm_count
< dev
->pd_mgr
->max_normal_pd
) {
360 pd_idx
= _ocrdma_pd_mgr_get_bitmap(dev
, false);
361 pd
->id
= dev
->pd_mgr
->pd_norm_start
+ pd_idx
;
366 mutex_unlock(&dev
->dev_lock
);
370 static struct ocrdma_pd
*_ocrdma_alloc_pd(struct ocrdma_dev
*dev
,
371 struct ocrdma_ucontext
*uctx
,
372 struct ib_udata
*udata
)
374 struct ocrdma_pd
*pd
= NULL
;
377 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
379 return ERR_PTR(-ENOMEM
);
381 if (udata
&& uctx
&& dev
->attr
.max_dpp_pds
) {
383 ocrdma_get_asic_type(dev
) == OCRDMA_ASIC_GEN_SKH_R
;
385 pd
->dpp_enabled
? (dev
->nic_info
.db_page_size
/
386 dev
->attr
.wqe_size
) : 0;
389 if (dev
->pd_mgr
->pd_prealloc_valid
) {
390 status
= ocrdma_get_pd_num(dev
, pd
);
395 return ERR_PTR(status
);
400 status
= ocrdma_mbx_alloc_pd(dev
, pd
);
402 if (pd
->dpp_enabled
) {
403 pd
->dpp_enabled
= false;
408 return ERR_PTR(status
);
415 static inline int is_ucontext_pd(struct ocrdma_ucontext
*uctx
,
416 struct ocrdma_pd
*pd
)
418 return (uctx
->cntxt_pd
== pd
);
421 static int _ocrdma_dealloc_pd(struct ocrdma_dev
*dev
,
422 struct ocrdma_pd
*pd
)
426 if (dev
->pd_mgr
->pd_prealloc_valid
)
427 status
= ocrdma_put_pd_num(dev
, pd
->id
, pd
->dpp_enabled
);
429 status
= ocrdma_mbx_dealloc_pd(dev
, pd
);
435 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev
*dev
,
436 struct ocrdma_ucontext
*uctx
,
437 struct ib_udata
*udata
)
441 uctx
->cntxt_pd
= _ocrdma_alloc_pd(dev
, uctx
, udata
);
442 if (IS_ERR(uctx
->cntxt_pd
)) {
443 status
= PTR_ERR(uctx
->cntxt_pd
);
444 uctx
->cntxt_pd
= NULL
;
448 uctx
->cntxt_pd
->uctx
= uctx
;
449 uctx
->cntxt_pd
->ibpd
.device
= &dev
->ibdev
;
454 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext
*uctx
)
456 struct ocrdma_pd
*pd
= uctx
->cntxt_pd
;
457 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
459 if (uctx
->pd_in_use
) {
460 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
461 __func__
, dev
->id
, pd
->id
);
463 uctx
->cntxt_pd
= NULL
;
464 (void)_ocrdma_dealloc_pd(dev
, pd
);
468 static struct ocrdma_pd
*ocrdma_get_ucontext_pd(struct ocrdma_ucontext
*uctx
)
470 struct ocrdma_pd
*pd
= NULL
;
472 mutex_lock(&uctx
->mm_list_lock
);
473 if (!uctx
->pd_in_use
) {
474 uctx
->pd_in_use
= true;
477 mutex_unlock(&uctx
->mm_list_lock
);
482 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext
*uctx
)
484 mutex_lock(&uctx
->mm_list_lock
);
485 uctx
->pd_in_use
= false;
486 mutex_unlock(&uctx
->mm_list_lock
);
489 struct ib_ucontext
*ocrdma_alloc_ucontext(struct ib_device
*ibdev
,
490 struct ib_udata
*udata
)
493 struct ocrdma_ucontext
*ctx
;
494 struct ocrdma_alloc_ucontext_resp resp
;
495 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibdev
);
496 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
497 u32 map_len
= roundup(sizeof(u32
) * 2048, PAGE_SIZE
);
500 return ERR_PTR(-EFAULT
);
501 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
503 return ERR_PTR(-ENOMEM
);
504 INIT_LIST_HEAD(&ctx
->mm_head
);
505 mutex_init(&ctx
->mm_list_lock
);
507 ctx
->ah_tbl
.va
= dma_alloc_coherent(&pdev
->dev
, map_len
,
508 &ctx
->ah_tbl
.pa
, GFP_KERNEL
);
509 if (!ctx
->ah_tbl
.va
) {
511 return ERR_PTR(-ENOMEM
);
513 ctx
->ah_tbl
.len
= map_len
;
515 memset(&resp
, 0, sizeof(resp
));
516 resp
.ah_tbl_len
= ctx
->ah_tbl
.len
;
517 resp
.ah_tbl_page
= virt_to_phys(ctx
->ah_tbl
.va
);
519 status
= ocrdma_add_mmap(ctx
, resp
.ah_tbl_page
, resp
.ah_tbl_len
);
523 status
= ocrdma_alloc_ucontext_pd(dev
, ctx
, udata
);
527 resp
.dev_id
= dev
->id
;
528 resp
.max_inline_data
= dev
->attr
.max_inline_data
;
529 resp
.wqe_size
= dev
->attr
.wqe_size
;
530 resp
.rqe_size
= dev
->attr
.rqe_size
;
531 resp
.dpp_wqe_size
= dev
->attr
.wqe_size
;
533 memcpy(resp
.fw_ver
, dev
->attr
.fw_ver
, sizeof(resp
.fw_ver
));
534 status
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
537 return &ctx
->ibucontext
;
541 ocrdma_del_mmap(ctx
, ctx
->ah_tbl
.pa
, ctx
->ah_tbl
.len
);
543 dma_free_coherent(&pdev
->dev
, ctx
->ah_tbl
.len
, ctx
->ah_tbl
.va
,
546 return ERR_PTR(status
);
549 int ocrdma_dealloc_ucontext(struct ib_ucontext
*ibctx
)
552 struct ocrdma_mm
*mm
, *tmp
;
553 struct ocrdma_ucontext
*uctx
= get_ocrdma_ucontext(ibctx
);
554 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibctx
->device
);
555 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
557 status
= ocrdma_dealloc_ucontext_pd(uctx
);
559 ocrdma_del_mmap(uctx
, uctx
->ah_tbl
.pa
, uctx
->ah_tbl
.len
);
560 dma_free_coherent(&pdev
->dev
, uctx
->ah_tbl
.len
, uctx
->ah_tbl
.va
,
563 list_for_each_entry_safe(mm
, tmp
, &uctx
->mm_head
, entry
) {
564 list_del(&mm
->entry
);
571 int ocrdma_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
573 struct ocrdma_ucontext
*ucontext
= get_ocrdma_ucontext(context
);
574 struct ocrdma_dev
*dev
= get_ocrdma_dev(context
->device
);
575 unsigned long vm_page
= vma
->vm_pgoff
<< PAGE_SHIFT
;
576 u64 unmapped_db
= (u64
) dev
->nic_info
.unmapped_db
;
577 unsigned long len
= (vma
->vm_end
- vma
->vm_start
);
581 if (vma
->vm_start
& (PAGE_SIZE
- 1))
583 found
= ocrdma_search_mmap(ucontext
, vma
->vm_pgoff
<< PAGE_SHIFT
, len
);
587 if ((vm_page
>= unmapped_db
) && (vm_page
<= (unmapped_db
+
588 dev
->nic_info
.db_total_size
)) &&
589 (len
<= dev
->nic_info
.db_page_size
)) {
590 if (vma
->vm_flags
& VM_READ
)
593 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
594 status
= io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
595 len
, vma
->vm_page_prot
);
596 } else if (dev
->nic_info
.dpp_unmapped_len
&&
597 (vm_page
>= (u64
) dev
->nic_info
.dpp_unmapped_addr
) &&
598 (vm_page
<= (u64
) (dev
->nic_info
.dpp_unmapped_addr
+
599 dev
->nic_info
.dpp_unmapped_len
)) &&
600 (len
<= dev
->nic_info
.dpp_unmapped_len
)) {
601 if (vma
->vm_flags
& VM_READ
)
604 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
605 status
= io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
606 len
, vma
->vm_page_prot
);
608 status
= remap_pfn_range(vma
, vma
->vm_start
,
609 vma
->vm_pgoff
, len
, vma
->vm_page_prot
);
614 static int ocrdma_copy_pd_uresp(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
,
615 struct ib_ucontext
*ib_ctx
,
616 struct ib_udata
*udata
)
620 u64 dpp_page_addr
= 0;
622 struct ocrdma_alloc_pd_uresp rsp
;
623 struct ocrdma_ucontext
*uctx
= get_ocrdma_ucontext(ib_ctx
);
625 memset(&rsp
, 0, sizeof(rsp
));
627 rsp
.dpp_enabled
= pd
->dpp_enabled
;
628 db_page_addr
= ocrdma_get_db_addr(dev
, pd
->id
);
629 db_page_size
= dev
->nic_info
.db_page_size
;
631 status
= ocrdma_add_mmap(uctx
, db_page_addr
, db_page_size
);
635 if (pd
->dpp_enabled
) {
636 dpp_page_addr
= dev
->nic_info
.dpp_unmapped_addr
+
637 (pd
->id
* PAGE_SIZE
);
638 status
= ocrdma_add_mmap(uctx
, dpp_page_addr
,
642 rsp
.dpp_page_addr_hi
= upper_32_bits(dpp_page_addr
);
643 rsp
.dpp_page_addr_lo
= dpp_page_addr
;
646 status
= ib_copy_to_udata(udata
, &rsp
, sizeof(rsp
));
655 ocrdma_del_mmap(pd
->uctx
, dpp_page_addr
, PAGE_SIZE
);
657 ocrdma_del_mmap(pd
->uctx
, db_page_addr
, db_page_size
);
661 struct ib_pd
*ocrdma_alloc_pd(struct ib_device
*ibdev
,
662 struct ib_ucontext
*context
,
663 struct ib_udata
*udata
)
665 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibdev
);
666 struct ocrdma_pd
*pd
;
667 struct ocrdma_ucontext
*uctx
= NULL
;
669 u8 is_uctx_pd
= false;
671 if (udata
&& context
) {
672 uctx
= get_ocrdma_ucontext(context
);
673 pd
= ocrdma_get_ucontext_pd(uctx
);
680 pd
= _ocrdma_alloc_pd(dev
, uctx
, udata
);
682 status
= PTR_ERR(pd
);
687 if (udata
&& context
) {
688 status
= ocrdma_copy_pd_uresp(dev
, pd
, context
, udata
);
696 ocrdma_release_ucontext_pd(uctx
);
698 if (_ocrdma_dealloc_pd(dev
, pd
))
699 pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__
);
702 return ERR_PTR(status
);
705 int ocrdma_dealloc_pd(struct ib_pd
*ibpd
)
707 struct ocrdma_pd
*pd
= get_ocrdma_pd(ibpd
);
708 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
709 struct ocrdma_ucontext
*uctx
= NULL
;
715 u64 dpp_db
= dev
->nic_info
.dpp_unmapped_addr
+
716 (pd
->id
* PAGE_SIZE
);
718 ocrdma_del_mmap(pd
->uctx
, dpp_db
, PAGE_SIZE
);
719 usr_db
= ocrdma_get_db_addr(dev
, pd
->id
);
720 ocrdma_del_mmap(pd
->uctx
, usr_db
, dev
->nic_info
.db_page_size
);
722 if (is_ucontext_pd(uctx
, pd
)) {
723 ocrdma_release_ucontext_pd(uctx
);
727 status
= _ocrdma_dealloc_pd(dev
, pd
);
731 static int ocrdma_alloc_lkey(struct ocrdma_dev
*dev
, struct ocrdma_mr
*mr
,
732 u32 pdid
, int acc
, u32 num_pbls
, u32 addr_check
)
737 mr
->hwmr
.local_rd
= 1;
738 mr
->hwmr
.remote_rd
= (acc
& IB_ACCESS_REMOTE_READ
) ? 1 : 0;
739 mr
->hwmr
.remote_wr
= (acc
& IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
740 mr
->hwmr
.local_wr
= (acc
& IB_ACCESS_LOCAL_WRITE
) ? 1 : 0;
741 mr
->hwmr
.mw_bind
= (acc
& IB_ACCESS_MW_BIND
) ? 1 : 0;
742 mr
->hwmr
.remote_atomic
= (acc
& IB_ACCESS_REMOTE_ATOMIC
) ? 1 : 0;
743 mr
->hwmr
.num_pbls
= num_pbls
;
745 status
= ocrdma_mbx_alloc_lkey(dev
, &mr
->hwmr
, pdid
, addr_check
);
749 mr
->ibmr
.lkey
= mr
->hwmr
.lkey
;
750 if (mr
->hwmr
.remote_wr
|| mr
->hwmr
.remote_rd
)
751 mr
->ibmr
.rkey
= mr
->hwmr
.lkey
;
755 struct ib_mr
*ocrdma_get_dma_mr(struct ib_pd
*ibpd
, int acc
)
758 struct ocrdma_mr
*mr
;
759 struct ocrdma_pd
*pd
= get_ocrdma_pd(ibpd
);
760 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
762 if (acc
& IB_ACCESS_REMOTE_WRITE
&& !(acc
& IB_ACCESS_LOCAL_WRITE
)) {
763 pr_err("%s err, invalid access rights\n", __func__
);
764 return ERR_PTR(-EINVAL
);
767 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
769 return ERR_PTR(-ENOMEM
);
771 status
= ocrdma_alloc_lkey(dev
, mr
, pd
->id
, acc
, 0,
772 OCRDMA_ADDR_CHECK_DISABLE
);
775 return ERR_PTR(status
);
781 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev
*dev
,
782 struct ocrdma_hw_mr
*mr
)
784 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
788 for (i
= 0; i
< mr
->num_pbls
; i
++) {
789 if (!mr
->pbl_table
[i
].va
)
791 dma_free_coherent(&pdev
->dev
, mr
->pbl_size
,
793 mr
->pbl_table
[i
].pa
);
795 kfree(mr
->pbl_table
);
796 mr
->pbl_table
= NULL
;
800 static int ocrdma_get_pbl_info(struct ocrdma_dev
*dev
, struct ocrdma_mr
*mr
,
809 pbl_size
= OCRDMA_MIN_HPAGE_SIZE
* (1 << idx
);
810 if (pbl_size
> MAX_OCRDMA_PBL_SIZE
) {
814 num_pbls
= roundup(num_pbes
, (pbl_size
/ sizeof(u64
)));
815 num_pbls
= num_pbls
/ (pbl_size
/ sizeof(u64
));
817 } while (num_pbls
>= dev
->attr
.max_num_mr_pbl
);
819 mr
->hwmr
.num_pbes
= num_pbes
;
820 mr
->hwmr
.num_pbls
= num_pbls
;
821 mr
->hwmr
.pbl_size
= pbl_size
;
825 static int ocrdma_build_pbl_tbl(struct ocrdma_dev
*dev
, struct ocrdma_hw_mr
*mr
)
829 u32 dma_len
= mr
->pbl_size
;
830 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
834 mr
->pbl_table
= kcalloc(mr
->num_pbls
, sizeof(struct ocrdma_pbl
),
840 for (i
= 0; i
< mr
->num_pbls
; i
++) {
841 va
= dma_alloc_coherent(&pdev
->dev
, dma_len
, &pa
, GFP_KERNEL
);
843 ocrdma_free_mr_pbl_tbl(dev
, mr
);
847 mr
->pbl_table
[i
].va
= va
;
848 mr
->pbl_table
[i
].pa
= pa
;
853 static void build_user_pbes(struct ocrdma_dev
*dev
, struct ocrdma_mr
*mr
,
856 struct ocrdma_pbe
*pbe
;
857 struct scatterlist
*sg
;
858 struct ocrdma_pbl
*pbl_tbl
= mr
->hwmr
.pbl_table
;
859 struct ib_umem
*umem
= mr
->umem
;
860 int shift
, pg_cnt
, pages
, pbe_cnt
, entry
, total_num_pbes
= 0;
862 if (!mr
->hwmr
.num_pbes
)
865 pbe
= (struct ocrdma_pbe
*)pbl_tbl
->va
;
868 shift
= umem
->page_shift
;
870 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
871 pages
= sg_dma_len(sg
) >> shift
;
872 for (pg_cnt
= 0; pg_cnt
< pages
; pg_cnt
++) {
873 /* store the page address in pbe */
875 cpu_to_le32(sg_dma_address(sg
) +
878 cpu_to_le32(upper_32_bits(sg_dma_address(sg
) +
884 /* if done building pbes, issue the mbx cmd. */
885 if (total_num_pbes
== num_pbes
)
888 /* if the given pbl is full storing the pbes,
892 (mr
->hwmr
.pbl_size
/ sizeof(u64
))) {
894 pbe
= (struct ocrdma_pbe
*)pbl_tbl
->va
;
902 struct ib_mr
*ocrdma_reg_user_mr(struct ib_pd
*ibpd
, u64 start
, u64 len
,
903 u64 usr_addr
, int acc
, struct ib_udata
*udata
)
905 int status
= -ENOMEM
;
906 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
907 struct ocrdma_mr
*mr
;
908 struct ocrdma_pd
*pd
;
911 pd
= get_ocrdma_pd(ibpd
);
913 if (acc
& IB_ACCESS_REMOTE_WRITE
&& !(acc
& IB_ACCESS_LOCAL_WRITE
))
914 return ERR_PTR(-EINVAL
);
916 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
918 return ERR_PTR(status
);
919 mr
->umem
= ib_umem_get(ibpd
->uobject
->context
, start
, len
, acc
, 0);
920 if (IS_ERR(mr
->umem
)) {
924 num_pbes
= ib_umem_page_count(mr
->umem
);
925 status
= ocrdma_get_pbl_info(dev
, mr
, num_pbes
);
929 mr
->hwmr
.pbe_size
= BIT(mr
->umem
->page_shift
);
930 mr
->hwmr
.fbo
= ib_umem_offset(mr
->umem
);
931 mr
->hwmr
.va
= usr_addr
;
933 mr
->hwmr
.remote_wr
= (acc
& IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
934 mr
->hwmr
.remote_rd
= (acc
& IB_ACCESS_REMOTE_READ
) ? 1 : 0;
935 mr
->hwmr
.local_wr
= (acc
& IB_ACCESS_LOCAL_WRITE
) ? 1 : 0;
936 mr
->hwmr
.local_rd
= 1;
937 mr
->hwmr
.remote_atomic
= (acc
& IB_ACCESS_REMOTE_ATOMIC
) ? 1 : 0;
938 status
= ocrdma_build_pbl_tbl(dev
, &mr
->hwmr
);
941 build_user_pbes(dev
, mr
, num_pbes
);
942 status
= ocrdma_reg_mr(dev
, &mr
->hwmr
, pd
->id
, acc
);
945 mr
->ibmr
.lkey
= mr
->hwmr
.lkey
;
946 if (mr
->hwmr
.remote_wr
|| mr
->hwmr
.remote_rd
)
947 mr
->ibmr
.rkey
= mr
->hwmr
.lkey
;
952 ocrdma_free_mr_pbl_tbl(dev
, &mr
->hwmr
);
955 return ERR_PTR(status
);
958 int ocrdma_dereg_mr(struct ib_mr
*ib_mr
)
960 struct ocrdma_mr
*mr
= get_ocrdma_mr(ib_mr
);
961 struct ocrdma_dev
*dev
= get_ocrdma_dev(ib_mr
->device
);
963 (void) ocrdma_mbx_dealloc_lkey(dev
, mr
->hwmr
.fr_mr
, mr
->hwmr
.lkey
);
966 ocrdma_free_mr_pbl_tbl(dev
, &mr
->hwmr
);
968 /* it could be user registered memory. */
970 ib_umem_release(mr
->umem
);
973 /* Don't stop cleanup, in case FW is unresponsive */
974 if (dev
->mqe_ctx
.fw_error_state
) {
975 pr_err("%s(%d) fw not responding.\n",
981 static int ocrdma_copy_cq_uresp(struct ocrdma_dev
*dev
, struct ocrdma_cq
*cq
,
982 struct ib_udata
*udata
,
983 struct ib_ucontext
*ib_ctx
)
986 struct ocrdma_ucontext
*uctx
= get_ocrdma_ucontext(ib_ctx
);
987 struct ocrdma_create_cq_uresp uresp
;
989 memset(&uresp
, 0, sizeof(uresp
));
990 uresp
.cq_id
= cq
->id
;
991 uresp
.page_size
= PAGE_ALIGN(cq
->len
);
993 uresp
.max_hw_cqe
= cq
->max_hw_cqe
;
994 uresp
.page_addr
[0] = virt_to_phys(cq
->va
);
995 uresp
.db_page_addr
= ocrdma_get_db_addr(dev
, uctx
->cntxt_pd
->id
);
996 uresp
.db_page_size
= dev
->nic_info
.db_page_size
;
997 uresp
.phase_change
= cq
->phase_change
? 1 : 0;
998 status
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
1000 pr_err("%s(%d) copy error cqid=0x%x.\n",
1001 __func__
, dev
->id
, cq
->id
);
1004 status
= ocrdma_add_mmap(uctx
, uresp
.db_page_addr
, uresp
.db_page_size
);
1007 status
= ocrdma_add_mmap(uctx
, uresp
.page_addr
[0], uresp
.page_size
);
1009 ocrdma_del_mmap(uctx
, uresp
.db_page_addr
, uresp
.db_page_size
);
1012 cq
->ucontext
= uctx
;
1017 struct ib_cq
*ocrdma_create_cq(struct ib_device
*ibdev
,
1018 const struct ib_cq_init_attr
*attr
,
1019 struct ib_ucontext
*ib_ctx
,
1020 struct ib_udata
*udata
)
1022 int entries
= attr
->cqe
;
1023 struct ocrdma_cq
*cq
;
1024 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibdev
);
1025 struct ocrdma_ucontext
*uctx
= NULL
;
1028 struct ocrdma_create_cq_ureq ureq
;
1031 return ERR_PTR(-EINVAL
);
1034 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
1035 return ERR_PTR(-EFAULT
);
1038 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
1040 return ERR_PTR(-ENOMEM
);
1042 spin_lock_init(&cq
->cq_lock
);
1043 spin_lock_init(&cq
->comp_handler_lock
);
1044 INIT_LIST_HEAD(&cq
->sq_head
);
1045 INIT_LIST_HEAD(&cq
->rq_head
);
1048 uctx
= get_ocrdma_ucontext(ib_ctx
);
1049 pd_id
= uctx
->cntxt_pd
->id
;
1052 status
= ocrdma_mbx_create_cq(dev
, cq
, entries
, ureq
.dpp_cq
, pd_id
);
1055 return ERR_PTR(status
);
1058 status
= ocrdma_copy_cq_uresp(dev
, cq
, udata
, ib_ctx
);
1062 cq
->phase
= OCRDMA_CQE_VALID
;
1063 dev
->cq_tbl
[cq
->id
] = cq
;
1067 ocrdma_mbx_destroy_cq(dev
, cq
);
1069 return ERR_PTR(status
);
1072 int ocrdma_resize_cq(struct ib_cq
*ibcq
, int new_cnt
,
1073 struct ib_udata
*udata
)
1076 struct ocrdma_cq
*cq
= get_ocrdma_cq(ibcq
);
1078 if (new_cnt
< 1 || new_cnt
> cq
->max_hw_cqe
) {
1082 ibcq
->cqe
= new_cnt
;
1086 static void ocrdma_flush_cq(struct ocrdma_cq
*cq
)
1089 int valid_count
= 0;
1090 unsigned long flags
;
1092 struct ocrdma_dev
*dev
= get_ocrdma_dev(cq
->ibcq
.device
);
1093 struct ocrdma_cqe
*cqe
= NULL
;
1096 cqe_cnt
= cq
->cqe_cnt
;
1098 /* Last irq might have scheduled a polling thread
1099 * sync-up with it before hard flushing.
1101 spin_lock_irqsave(&cq
->cq_lock
, flags
);
1103 if (is_cqe_valid(cq
, cqe
))
1108 ocrdma_ring_cq_db(dev
, cq
->id
, false, false, valid_count
);
1109 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
1112 int ocrdma_destroy_cq(struct ib_cq
*ibcq
)
1114 struct ocrdma_cq
*cq
= get_ocrdma_cq(ibcq
);
1115 struct ocrdma_eq
*eq
= NULL
;
1116 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibcq
->device
);
1120 dev
->cq_tbl
[cq
->id
] = NULL
;
1121 indx
= ocrdma_get_eq_table_index(dev
, cq
->eqn
);
1122 BUG_ON(indx
== -EINVAL
);
1124 eq
= &dev
->eq_tbl
[indx
];
1125 irq
= ocrdma_get_irq(dev
, eq
);
1126 synchronize_irq(irq
);
1127 ocrdma_flush_cq(cq
);
1129 (void)ocrdma_mbx_destroy_cq(dev
, cq
);
1131 pdid
= cq
->ucontext
->cntxt_pd
->id
;
1132 ocrdma_del_mmap(cq
->ucontext
, (u64
) cq
->pa
,
1133 PAGE_ALIGN(cq
->len
));
1134 ocrdma_del_mmap(cq
->ucontext
,
1135 ocrdma_get_db_addr(dev
, pdid
),
1136 dev
->nic_info
.db_page_size
);
1143 static int ocrdma_add_qpn_map(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
)
1145 int status
= -EINVAL
;
1147 if (qp
->id
< OCRDMA_MAX_QP
&& dev
->qp_tbl
[qp
->id
] == NULL
) {
1148 dev
->qp_tbl
[qp
->id
] = qp
;
1154 static void ocrdma_del_qpn_map(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
)
1156 dev
->qp_tbl
[qp
->id
] = NULL
;
1159 static int ocrdma_check_qp_params(struct ib_pd
*ibpd
, struct ocrdma_dev
*dev
,
1160 struct ib_qp_init_attr
*attrs
,
1161 struct ib_udata
*udata
)
1163 if ((attrs
->qp_type
!= IB_QPT_GSI
) &&
1164 (attrs
->qp_type
!= IB_QPT_RC
) &&
1165 (attrs
->qp_type
!= IB_QPT_UC
) &&
1166 (attrs
->qp_type
!= IB_QPT_UD
)) {
1167 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1168 __func__
, dev
->id
, attrs
->qp_type
);
1171 /* Skip the check for QP1 to support CM size of 128 */
1172 if ((attrs
->qp_type
!= IB_QPT_GSI
) &&
1173 (attrs
->cap
.max_send_wr
> dev
->attr
.max_wqe
)) {
1174 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1175 __func__
, dev
->id
, attrs
->cap
.max_send_wr
);
1176 pr_err("%s(%d) supported send_wr=0x%x\n",
1177 __func__
, dev
->id
, dev
->attr
.max_wqe
);
1180 if (!attrs
->srq
&& (attrs
->cap
.max_recv_wr
> dev
->attr
.max_rqe
)) {
1181 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1182 __func__
, dev
->id
, attrs
->cap
.max_recv_wr
);
1183 pr_err("%s(%d) supported recv_wr=0x%x\n",
1184 __func__
, dev
->id
, dev
->attr
.max_rqe
);
1187 if (attrs
->cap
.max_inline_data
> dev
->attr
.max_inline_data
) {
1188 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1189 __func__
, dev
->id
, attrs
->cap
.max_inline_data
);
1190 pr_err("%s(%d) supported inline data size=0x%x\n",
1191 __func__
, dev
->id
, dev
->attr
.max_inline_data
);
1194 if (attrs
->cap
.max_send_sge
> dev
->attr
.max_send_sge
) {
1195 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1196 __func__
, dev
->id
, attrs
->cap
.max_send_sge
);
1197 pr_err("%s(%d) supported send_sge=0x%x\n",
1198 __func__
, dev
->id
, dev
->attr
.max_send_sge
);
1201 if (attrs
->cap
.max_recv_sge
> dev
->attr
.max_recv_sge
) {
1202 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1203 __func__
, dev
->id
, attrs
->cap
.max_recv_sge
);
1204 pr_err("%s(%d) supported recv_sge=0x%x\n",
1205 __func__
, dev
->id
, dev
->attr
.max_recv_sge
);
1208 /* unprivileged user space cannot create special QP */
1209 if (udata
&& attrs
->qp_type
== IB_QPT_GSI
) {
1211 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1212 __func__
, dev
->id
, attrs
->qp_type
);
1215 /* allow creating only one GSI type of QP */
1216 if (attrs
->qp_type
== IB_QPT_GSI
&& dev
->gsi_qp_created
) {
1217 pr_err("%s(%d) GSI special QPs already created.\n",
1221 /* verify consumer QPs are not trying to use GSI QP's CQ */
1222 if ((attrs
->qp_type
!= IB_QPT_GSI
) && (dev
->gsi_qp_created
)) {
1223 if ((dev
->gsi_sqcq
== get_ocrdma_cq(attrs
->send_cq
)) ||
1224 (dev
->gsi_rqcq
== get_ocrdma_cq(attrs
->recv_cq
))) {
1225 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1233 static int ocrdma_copy_qp_uresp(struct ocrdma_qp
*qp
,
1234 struct ib_udata
*udata
, int dpp_offset
,
1235 int dpp_credit_lmt
, int srq
)
1239 struct ocrdma_create_qp_uresp uresp
;
1240 struct ocrdma_pd
*pd
= qp
->pd
;
1241 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
1243 memset(&uresp
, 0, sizeof(uresp
));
1244 usr_db
= dev
->nic_info
.unmapped_db
+
1245 (pd
->id
* dev
->nic_info
.db_page_size
);
1246 uresp
.qp_id
= qp
->id
;
1247 uresp
.sq_dbid
= qp
->sq
.dbid
;
1248 uresp
.num_sq_pages
= 1;
1249 uresp
.sq_page_size
= PAGE_ALIGN(qp
->sq
.len
);
1250 uresp
.sq_page_addr
[0] = virt_to_phys(qp
->sq
.va
);
1251 uresp
.num_wqe_allocated
= qp
->sq
.max_cnt
;
1253 uresp
.rq_dbid
= qp
->rq
.dbid
;
1254 uresp
.num_rq_pages
= 1;
1255 uresp
.rq_page_size
= PAGE_ALIGN(qp
->rq
.len
);
1256 uresp
.rq_page_addr
[0] = virt_to_phys(qp
->rq
.va
);
1257 uresp
.num_rqe_allocated
= qp
->rq
.max_cnt
;
1259 uresp
.db_page_addr
= usr_db
;
1260 uresp
.db_page_size
= dev
->nic_info
.db_page_size
;
1261 uresp
.db_sq_offset
= OCRDMA_DB_GEN2_SQ_OFFSET
;
1262 uresp
.db_rq_offset
= OCRDMA_DB_GEN2_RQ_OFFSET
;
1263 uresp
.db_shift
= OCRDMA_DB_RQ_SHIFT
;
1265 if (qp
->dpp_enabled
) {
1266 uresp
.dpp_credit
= dpp_credit_lmt
;
1267 uresp
.dpp_offset
= dpp_offset
;
1269 status
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
1271 pr_err("%s(%d) user copy error.\n", __func__
, dev
->id
);
1274 status
= ocrdma_add_mmap(pd
->uctx
, uresp
.sq_page_addr
[0],
1275 uresp
.sq_page_size
);
1280 status
= ocrdma_add_mmap(pd
->uctx
, uresp
.rq_page_addr
[0],
1281 uresp
.rq_page_size
);
1287 ocrdma_del_mmap(pd
->uctx
, uresp
.sq_page_addr
[0], uresp
.sq_page_size
);
1292 static void ocrdma_set_qp_db(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
,
1293 struct ocrdma_pd
*pd
)
1295 if (ocrdma_get_asic_type(dev
) == OCRDMA_ASIC_GEN_SKH_R
) {
1296 qp
->sq_db
= dev
->nic_info
.db
+
1297 (pd
->id
* dev
->nic_info
.db_page_size
) +
1298 OCRDMA_DB_GEN2_SQ_OFFSET
;
1299 qp
->rq_db
= dev
->nic_info
.db
+
1300 (pd
->id
* dev
->nic_info
.db_page_size
) +
1301 OCRDMA_DB_GEN2_RQ_OFFSET
;
1303 qp
->sq_db
= dev
->nic_info
.db
+
1304 (pd
->id
* dev
->nic_info
.db_page_size
) +
1305 OCRDMA_DB_SQ_OFFSET
;
1306 qp
->rq_db
= dev
->nic_info
.db
+
1307 (pd
->id
* dev
->nic_info
.db_page_size
) +
1308 OCRDMA_DB_RQ_OFFSET
;
1312 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp
*qp
)
1315 kcalloc(qp
->sq
.max_cnt
, sizeof(*(qp
->wqe_wr_id_tbl
)),
1317 if (qp
->wqe_wr_id_tbl
== NULL
)
1320 kcalloc(qp
->rq
.max_cnt
, sizeof(u64
), GFP_KERNEL
);
1321 if (qp
->rqe_wr_id_tbl
== NULL
)
1327 static void ocrdma_set_qp_init_params(struct ocrdma_qp
*qp
,
1328 struct ocrdma_pd
*pd
,
1329 struct ib_qp_init_attr
*attrs
)
1332 spin_lock_init(&qp
->q_lock
);
1333 INIT_LIST_HEAD(&qp
->sq_entry
);
1334 INIT_LIST_HEAD(&qp
->rq_entry
);
1336 qp
->qp_type
= attrs
->qp_type
;
1337 qp
->cap_flags
= OCRDMA_QP_INB_RD
| OCRDMA_QP_INB_WR
;
1338 qp
->max_inline_data
= attrs
->cap
.max_inline_data
;
1339 qp
->sq
.max_sges
= attrs
->cap
.max_send_sge
;
1340 qp
->rq
.max_sges
= attrs
->cap
.max_recv_sge
;
1341 qp
->state
= OCRDMA_QPS_RST
;
1342 qp
->signaled
= (attrs
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ? true : false;
1345 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev
*dev
,
1346 struct ib_qp_init_attr
*attrs
)
1348 if (attrs
->qp_type
== IB_QPT_GSI
) {
1349 dev
->gsi_qp_created
= 1;
1350 dev
->gsi_sqcq
= get_ocrdma_cq(attrs
->send_cq
);
1351 dev
->gsi_rqcq
= get_ocrdma_cq(attrs
->recv_cq
);
1355 struct ib_qp
*ocrdma_create_qp(struct ib_pd
*ibpd
,
1356 struct ib_qp_init_attr
*attrs
,
1357 struct ib_udata
*udata
)
1360 struct ocrdma_pd
*pd
= get_ocrdma_pd(ibpd
);
1361 struct ocrdma_qp
*qp
;
1362 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
1363 struct ocrdma_create_qp_ureq ureq
;
1364 u16 dpp_credit_lmt
, dpp_offset
;
1366 status
= ocrdma_check_qp_params(ibpd
, dev
, attrs
, udata
);
1370 memset(&ureq
, 0, sizeof(ureq
));
1372 if (ib_copy_from_udata(&ureq
, udata
, sizeof(ureq
)))
1373 return ERR_PTR(-EFAULT
);
1375 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1380 ocrdma_set_qp_init_params(qp
, pd
, attrs
);
1382 qp
->cap_flags
|= (OCRDMA_QP_MW_BIND
| OCRDMA_QP_LKEY0
|
1383 OCRDMA_QP_FAST_REG
);
1385 mutex_lock(&dev
->dev_lock
);
1386 status
= ocrdma_mbx_create_qp(qp
, attrs
, ureq
.enable_dpp_cq
,
1388 &dpp_offset
, &dpp_credit_lmt
);
1392 /* user space QP's wr_id table are managed in library */
1393 if (udata
== NULL
) {
1394 status
= ocrdma_alloc_wr_id_tbl(qp
);
1399 status
= ocrdma_add_qpn_map(dev
, qp
);
1402 ocrdma_set_qp_db(dev
, qp
, pd
);
1404 status
= ocrdma_copy_qp_uresp(qp
, udata
, dpp_offset
,
1406 (attrs
->srq
!= NULL
));
1410 ocrdma_store_gsi_qp_cq(dev
, attrs
);
1411 qp
->ibqp
.qp_num
= qp
->id
;
1412 mutex_unlock(&dev
->dev_lock
);
1416 ocrdma_del_qpn_map(dev
, qp
);
1418 ocrdma_mbx_destroy_qp(dev
, qp
);
1420 mutex_unlock(&dev
->dev_lock
);
1421 kfree(qp
->wqe_wr_id_tbl
);
1422 kfree(qp
->rqe_wr_id_tbl
);
1424 pr_err("%s(%d) error=%d\n", __func__
, dev
->id
, status
);
1426 return ERR_PTR(status
);
1429 int _ocrdma_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1433 struct ocrdma_qp
*qp
;
1434 struct ocrdma_dev
*dev
;
1435 enum ib_qp_state old_qps
;
1437 qp
= get_ocrdma_qp(ibqp
);
1438 dev
= get_ocrdma_dev(ibqp
->device
);
1439 if (attr_mask
& IB_QP_STATE
)
1440 status
= ocrdma_qp_state_change(qp
, attr
->qp_state
, &old_qps
);
1441 /* if new and previous states are same hw doesn't need to
1446 return ocrdma_mbx_modify_qp(dev
, qp
, attr
, attr_mask
);
1449 int ocrdma_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1450 int attr_mask
, struct ib_udata
*udata
)
1452 unsigned long flags
;
1453 int status
= -EINVAL
;
1454 struct ocrdma_qp
*qp
;
1455 struct ocrdma_dev
*dev
;
1456 enum ib_qp_state old_qps
, new_qps
;
1458 qp
= get_ocrdma_qp(ibqp
);
1459 dev
= get_ocrdma_dev(ibqp
->device
);
1461 /* syncronize with multiple context trying to change, retrive qps */
1462 mutex_lock(&dev
->dev_lock
);
1463 /* syncronize with wqe, rqe posting and cqe processing contexts */
1464 spin_lock_irqsave(&qp
->q_lock
, flags
);
1465 old_qps
= get_ibqp_state(qp
->state
);
1466 if (attr_mask
& IB_QP_STATE
)
1467 new_qps
= attr
->qp_state
;
1470 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
1472 if (!ib_modify_qp_is_ok(old_qps
, new_qps
, ibqp
->qp_type
, attr_mask
)) {
1473 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1474 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1475 __func__
, dev
->id
, attr_mask
, qp
->id
, ibqp
->qp_type
,
1480 status
= _ocrdma_modify_qp(ibqp
, attr
, attr_mask
);
1484 mutex_unlock(&dev
->dev_lock
);
1488 static enum ib_mtu
ocrdma_mtu_int_to_enum(u16 mtu
)
1506 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags
)
1508 int ib_qp_acc_flags
= 0;
1510 if (qp_cap_flags
& OCRDMA_QP_INB_WR
)
1511 ib_qp_acc_flags
|= IB_ACCESS_REMOTE_WRITE
;
1512 if (qp_cap_flags
& OCRDMA_QP_INB_RD
)
1513 ib_qp_acc_flags
|= IB_ACCESS_LOCAL_WRITE
;
1514 return ib_qp_acc_flags
;
1517 int ocrdma_query_qp(struct ib_qp
*ibqp
,
1518 struct ib_qp_attr
*qp_attr
,
1519 int attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1523 struct ocrdma_qp_params params
;
1524 struct ocrdma_qp
*qp
= get_ocrdma_qp(ibqp
);
1525 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibqp
->device
);
1527 memset(¶ms
, 0, sizeof(params
));
1528 mutex_lock(&dev
->dev_lock
);
1529 status
= ocrdma_mbx_query_qp(dev
, qp
, ¶ms
);
1530 mutex_unlock(&dev
->dev_lock
);
1533 if (qp
->qp_type
== IB_QPT_UD
)
1534 qp_attr
->qkey
= params
.qkey
;
1536 ocrdma_mtu_int_to_enum(params
.path_mtu_pkey_indx
&
1537 OCRDMA_QP_PARAMS_PATH_MTU_MASK
) >>
1538 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT
;
1539 qp_attr
->path_mig_state
= IB_MIG_MIGRATED
;
1540 qp_attr
->rq_psn
= params
.hop_lmt_rq_psn
& OCRDMA_QP_PARAMS_RQ_PSN_MASK
;
1541 qp_attr
->sq_psn
= params
.tclass_sq_psn
& OCRDMA_QP_PARAMS_SQ_PSN_MASK
;
1542 qp_attr
->dest_qp_num
=
1543 params
.ack_to_rnr_rtc_dest_qpn
& OCRDMA_QP_PARAMS_DEST_QPN_MASK
;
1545 qp_attr
->qp_access_flags
= ocrdma_to_ib_qp_acc_flags(qp
->cap_flags
);
1546 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_cnt
- 1;
1547 qp_attr
->cap
.max_recv_wr
= qp
->rq
.max_cnt
- 1;
1548 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_sges
;
1549 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_sges
;
1550 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
1551 qp_init_attr
->cap
= qp_attr
->cap
;
1552 qp_attr
->ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
1554 rdma_ah_set_grh(&qp_attr
->ah_attr
, NULL
,
1555 params
.rnt_rc_sl_fl
&
1556 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK
,
1558 (params
.hop_lmt_rq_psn
&
1559 OCRDMA_QP_PARAMS_HOP_LMT_MASK
) >>
1560 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT
,
1561 (params
.tclass_sq_psn
&
1562 OCRDMA_QP_PARAMS_TCLASS_MASK
) >>
1563 OCRDMA_QP_PARAMS_TCLASS_SHIFT
);
1564 rdma_ah_set_dgid_raw(&qp_attr
->ah_attr
, ¶ms
.dgid
[0]);
1566 rdma_ah_set_port_num(&qp_attr
->ah_attr
, 1);
1567 rdma_ah_set_sl(&qp_attr
->ah_attr
, (params
.rnt_rc_sl_fl
&
1568 OCRDMA_QP_PARAMS_SL_MASK
) >>
1569 OCRDMA_QP_PARAMS_SL_SHIFT
);
1570 qp_attr
->timeout
= (params
.ack_to_rnr_rtc_dest_qpn
&
1571 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK
) >>
1572 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT
;
1573 qp_attr
->rnr_retry
= (params
.ack_to_rnr_rtc_dest_qpn
&
1574 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK
) >>
1575 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT
;
1576 qp_attr
->retry_cnt
=
1577 (params
.rnt_rc_sl_fl
& OCRDMA_QP_PARAMS_RETRY_CNT_MASK
) >>
1578 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT
;
1579 qp_attr
->min_rnr_timer
= 0;
1580 qp_attr
->pkey_index
= 0;
1581 qp_attr
->port_num
= 1;
1582 rdma_ah_set_path_bits(&qp_attr
->ah_attr
, 0);
1583 rdma_ah_set_static_rate(&qp_attr
->ah_attr
, 0);
1584 qp_attr
->alt_pkey_index
= 0;
1585 qp_attr
->alt_port_num
= 0;
1586 qp_attr
->alt_timeout
= 0;
1587 memset(&qp_attr
->alt_ah_attr
, 0, sizeof(qp_attr
->alt_ah_attr
));
1588 qp_state
= (params
.max_sge_recv_flags
& OCRDMA_QP_PARAMS_STATE_MASK
) >>
1589 OCRDMA_QP_PARAMS_STATE_SHIFT
;
1590 qp_attr
->qp_state
= get_ibqp_state(qp_state
);
1591 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
1592 qp_attr
->sq_draining
= (qp_state
== OCRDMA_QPS_SQ_DRAINING
) ? 1 : 0;
1593 qp_attr
->max_dest_rd_atomic
=
1594 params
.max_ord_ird
>> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT
;
1595 qp_attr
->max_rd_atomic
=
1596 params
.max_ord_ird
& OCRDMA_QP_PARAMS_MAX_IRD_MASK
;
1597 qp_attr
->en_sqd_async_notify
= (params
.max_sge_recv_flags
&
1598 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC
) ? 1 : 0;
1599 /* Sync driver QP state with FW */
1600 ocrdma_qp_state_change(qp
, qp_attr
->qp_state
, NULL
);
1605 static void ocrdma_srq_toggle_bit(struct ocrdma_srq
*srq
, unsigned int idx
)
1607 unsigned int i
= idx
/ 32;
1608 u32 mask
= (1U << (idx
% 32));
1610 srq
->idx_bit_fields
[i
] ^= mask
;
1613 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info
*q
)
1615 return ((q
->max_wqe_idx
- q
->head
) + q
->tail
) % q
->max_cnt
;
1618 static int is_hw_sq_empty(struct ocrdma_qp
*qp
)
1620 return (qp
->sq
.tail
== qp
->sq
.head
);
1623 static int is_hw_rq_empty(struct ocrdma_qp
*qp
)
1625 return (qp
->rq
.tail
== qp
->rq
.head
);
1628 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info
*q
)
1630 return q
->va
+ (q
->head
* q
->entry_size
);
1633 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info
*q
,
1636 return q
->va
+ (idx
* q
->entry_size
);
1639 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info
*q
)
1641 q
->head
= (q
->head
+ 1) & q
->max_wqe_idx
;
1644 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info
*q
)
1646 q
->tail
= (q
->tail
+ 1) & q
->max_wqe_idx
;
1649 /* discard the cqe for a given QP */
1650 static void ocrdma_discard_cqes(struct ocrdma_qp
*qp
, struct ocrdma_cq
*cq
)
1652 unsigned long cq_flags
;
1653 unsigned long flags
;
1654 int discard_cnt
= 0;
1655 u32 cur_getp
, stop_getp
;
1656 struct ocrdma_cqe
*cqe
;
1657 u32 qpn
= 0, wqe_idx
= 0;
1659 spin_lock_irqsave(&cq
->cq_lock
, cq_flags
);
1661 /* traverse through the CQEs in the hw CQ,
1662 * find the matching CQE for a given qp,
1663 * mark the matching one discarded by clearing qpn.
1664 * ring the doorbell in the poll_cq() as
1665 * we don't complete out of order cqe.
1668 cur_getp
= cq
->getp
;
1669 /* find upto when do we reap the cq. */
1670 stop_getp
= cur_getp
;
1672 if (is_hw_sq_empty(qp
) && (!qp
->srq
&& is_hw_rq_empty(qp
)))
1675 cqe
= cq
->va
+ cur_getp
;
1676 /* if (a) done reaping whole hw cq, or
1677 * (b) qp_xq becomes empty.
1680 qpn
= cqe
->cmn
.qpn
& OCRDMA_CQE_QPN_MASK
;
1681 /* if previously discarded cqe found, skip that too. */
1682 /* check for matching qp */
1683 if (qpn
== 0 || qpn
!= qp
->id
)
1686 if (is_cqe_for_sq(cqe
)) {
1687 ocrdma_hwq_inc_tail(&qp
->sq
);
1690 wqe_idx
= (le32_to_cpu(cqe
->rq
.buftag_qpn
) >>
1691 OCRDMA_CQE_BUFTAG_SHIFT
) &
1692 qp
->srq
->rq
.max_wqe_idx
;
1693 BUG_ON(wqe_idx
< 1);
1694 spin_lock_irqsave(&qp
->srq
->q_lock
, flags
);
1695 ocrdma_hwq_inc_tail(&qp
->srq
->rq
);
1696 ocrdma_srq_toggle_bit(qp
->srq
, wqe_idx
- 1);
1697 spin_unlock_irqrestore(&qp
->srq
->q_lock
, flags
);
1700 ocrdma_hwq_inc_tail(&qp
->rq
);
1703 /* mark cqe discarded so that it is not picked up later
1709 cur_getp
= (cur_getp
+ 1) % cq
->max_hw_cqe
;
1710 } while (cur_getp
!= stop_getp
);
1711 spin_unlock_irqrestore(&cq
->cq_lock
, cq_flags
);
1714 void ocrdma_del_flush_qp(struct ocrdma_qp
*qp
)
1717 unsigned long flags
;
1718 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
1719 /* sync with any active CQ poll */
1721 spin_lock_irqsave(&dev
->flush_q_lock
, flags
);
1722 found
= ocrdma_is_qp_in_sq_flushlist(qp
->sq_cq
, qp
);
1724 list_del(&qp
->sq_entry
);
1726 found
= ocrdma_is_qp_in_rq_flushlist(qp
->rq_cq
, qp
);
1728 list_del(&qp
->rq_entry
);
1730 spin_unlock_irqrestore(&dev
->flush_q_lock
, flags
);
1733 int ocrdma_destroy_qp(struct ib_qp
*ibqp
)
1735 struct ocrdma_pd
*pd
;
1736 struct ocrdma_qp
*qp
;
1737 struct ocrdma_dev
*dev
;
1738 struct ib_qp_attr attrs
;
1740 unsigned long flags
;
1742 qp
= get_ocrdma_qp(ibqp
);
1743 dev
= get_ocrdma_dev(ibqp
->device
);
1747 /* change the QP state to ERROR */
1748 if (qp
->state
!= OCRDMA_QPS_RST
) {
1749 attrs
.qp_state
= IB_QPS_ERR
;
1750 attr_mask
= IB_QP_STATE
;
1751 _ocrdma_modify_qp(ibqp
, &attrs
, attr_mask
);
1753 /* ensure that CQEs for newly created QP (whose id may be same with
1754 * one which just getting destroyed are same), dont get
1755 * discarded until the old CQEs are discarded.
1757 mutex_lock(&dev
->dev_lock
);
1758 (void) ocrdma_mbx_destroy_qp(dev
, qp
);
1761 * acquire CQ lock while destroy is in progress, in order to
1762 * protect against proessing in-flight CQEs for this QP.
1764 spin_lock_irqsave(&qp
->sq_cq
->cq_lock
, flags
);
1765 if (qp
->rq_cq
&& (qp
->rq_cq
!= qp
->sq_cq
)) {
1766 spin_lock(&qp
->rq_cq
->cq_lock
);
1767 ocrdma_del_qpn_map(dev
, qp
);
1768 spin_unlock(&qp
->rq_cq
->cq_lock
);
1770 ocrdma_del_qpn_map(dev
, qp
);
1772 spin_unlock_irqrestore(&qp
->sq_cq
->cq_lock
, flags
);
1775 ocrdma_discard_cqes(qp
, qp
->sq_cq
);
1776 ocrdma_discard_cqes(qp
, qp
->rq_cq
);
1778 mutex_unlock(&dev
->dev_lock
);
1781 ocrdma_del_mmap(pd
->uctx
, (u64
) qp
->sq
.pa
,
1782 PAGE_ALIGN(qp
->sq
.len
));
1784 ocrdma_del_mmap(pd
->uctx
, (u64
) qp
->rq
.pa
,
1785 PAGE_ALIGN(qp
->rq
.len
));
1788 ocrdma_del_flush_qp(qp
);
1790 kfree(qp
->wqe_wr_id_tbl
);
1791 kfree(qp
->rqe_wr_id_tbl
);
1796 static int ocrdma_copy_srq_uresp(struct ocrdma_dev
*dev
, struct ocrdma_srq
*srq
,
1797 struct ib_udata
*udata
)
1800 struct ocrdma_create_srq_uresp uresp
;
1802 memset(&uresp
, 0, sizeof(uresp
));
1803 uresp
.rq_dbid
= srq
->rq
.dbid
;
1804 uresp
.num_rq_pages
= 1;
1805 uresp
.rq_page_addr
[0] = virt_to_phys(srq
->rq
.va
);
1806 uresp
.rq_page_size
= srq
->rq
.len
;
1807 uresp
.db_page_addr
= dev
->nic_info
.unmapped_db
+
1808 (srq
->pd
->id
* dev
->nic_info
.db_page_size
);
1809 uresp
.db_page_size
= dev
->nic_info
.db_page_size
;
1810 uresp
.num_rqe_allocated
= srq
->rq
.max_cnt
;
1811 if (ocrdma_get_asic_type(dev
) == OCRDMA_ASIC_GEN_SKH_R
) {
1812 uresp
.db_rq_offset
= OCRDMA_DB_GEN2_RQ_OFFSET
;
1813 uresp
.db_shift
= 24;
1815 uresp
.db_rq_offset
= OCRDMA_DB_RQ_OFFSET
;
1816 uresp
.db_shift
= 16;
1819 status
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
1822 status
= ocrdma_add_mmap(srq
->pd
->uctx
, uresp
.rq_page_addr
[0],
1823 uresp
.rq_page_size
);
1829 struct ib_srq
*ocrdma_create_srq(struct ib_pd
*ibpd
,
1830 struct ib_srq_init_attr
*init_attr
,
1831 struct ib_udata
*udata
)
1833 int status
= -ENOMEM
;
1834 struct ocrdma_pd
*pd
= get_ocrdma_pd(ibpd
);
1835 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
1836 struct ocrdma_srq
*srq
;
1838 if (init_attr
->attr
.max_sge
> dev
->attr
.max_recv_sge
)
1839 return ERR_PTR(-EINVAL
);
1840 if (init_attr
->attr
.max_wr
> dev
->attr
.max_rqe
)
1841 return ERR_PTR(-EINVAL
);
1843 srq
= kzalloc(sizeof(*srq
), GFP_KERNEL
);
1845 return ERR_PTR(status
);
1847 spin_lock_init(&srq
->q_lock
);
1849 srq
->db
= dev
->nic_info
.db
+ (pd
->id
* dev
->nic_info
.db_page_size
);
1850 status
= ocrdma_mbx_create_srq(dev
, srq
, init_attr
, pd
);
1854 if (udata
== NULL
) {
1856 srq
->rqe_wr_id_tbl
= kcalloc(srq
->rq
.max_cnt
, sizeof(u64
),
1858 if (srq
->rqe_wr_id_tbl
== NULL
)
1861 srq
->bit_fields_len
= (srq
->rq
.max_cnt
/ 32) +
1862 (srq
->rq
.max_cnt
% 32 ? 1 : 0);
1863 srq
->idx_bit_fields
=
1864 kmalloc_array(srq
->bit_fields_len
, sizeof(u32
),
1866 if (srq
->idx_bit_fields
== NULL
)
1868 memset(srq
->idx_bit_fields
, 0xff,
1869 srq
->bit_fields_len
* sizeof(u32
));
1872 if (init_attr
->attr
.srq_limit
) {
1873 status
= ocrdma_mbx_modify_srq(srq
, &init_attr
->attr
);
1879 status
= ocrdma_copy_srq_uresp(dev
, srq
, udata
);
1887 ocrdma_mbx_destroy_srq(dev
, srq
);
1889 kfree(srq
->rqe_wr_id_tbl
);
1890 kfree(srq
->idx_bit_fields
);
1892 return ERR_PTR(status
);
1895 int ocrdma_modify_srq(struct ib_srq
*ibsrq
,
1896 struct ib_srq_attr
*srq_attr
,
1897 enum ib_srq_attr_mask srq_attr_mask
,
1898 struct ib_udata
*udata
)
1901 struct ocrdma_srq
*srq
;
1903 srq
= get_ocrdma_srq(ibsrq
);
1904 if (srq_attr_mask
& IB_SRQ_MAX_WR
)
1907 status
= ocrdma_mbx_modify_srq(srq
, srq_attr
);
1911 int ocrdma_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
)
1914 struct ocrdma_srq
*srq
;
1916 srq
= get_ocrdma_srq(ibsrq
);
1917 status
= ocrdma_mbx_query_srq(srq
, srq_attr
);
1921 int ocrdma_destroy_srq(struct ib_srq
*ibsrq
)
1924 struct ocrdma_srq
*srq
;
1925 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibsrq
->device
);
1927 srq
= get_ocrdma_srq(ibsrq
);
1929 status
= ocrdma_mbx_destroy_srq(dev
, srq
);
1932 ocrdma_del_mmap(srq
->pd
->uctx
, (u64
) srq
->rq
.pa
,
1933 PAGE_ALIGN(srq
->rq
.len
));
1935 kfree(srq
->idx_bit_fields
);
1936 kfree(srq
->rqe_wr_id_tbl
);
1941 /* unprivileged verbs and their support functions. */
1942 static void ocrdma_build_ud_hdr(struct ocrdma_qp
*qp
,
1943 struct ocrdma_hdr_wqe
*hdr
,
1944 const struct ib_send_wr
*wr
)
1946 struct ocrdma_ewqe_ud_hdr
*ud_hdr
=
1947 (struct ocrdma_ewqe_ud_hdr
*)(hdr
+ 1);
1948 struct ocrdma_ah
*ah
= get_ocrdma_ah(ud_wr(wr
)->ah
);
1950 ud_hdr
->rsvd_dest_qpn
= ud_wr(wr
)->remote_qpn
;
1951 if (qp
->qp_type
== IB_QPT_GSI
)
1952 ud_hdr
->qkey
= qp
->qkey
;
1954 ud_hdr
->qkey
= ud_wr(wr
)->remote_qkey
;
1955 ud_hdr
->rsvd_ahid
= ah
->id
;
1956 ud_hdr
->hdr_type
= ah
->hdr_type
;
1957 if (ah
->av
->valid
& OCRDMA_AV_VLAN_VALID
)
1958 hdr
->cw
|= (OCRDMA_FLAG_AH_VLAN_PR
<< OCRDMA_WQE_FLAGS_SHIFT
);
1961 static void ocrdma_build_sges(struct ocrdma_hdr_wqe
*hdr
,
1962 struct ocrdma_sge
*sge
, int num_sge
,
1963 struct ib_sge
*sg_list
)
1967 for (i
= 0; i
< num_sge
; i
++) {
1968 sge
[i
].lrkey
= sg_list
[i
].lkey
;
1969 sge
[i
].addr_lo
= sg_list
[i
].addr
;
1970 sge
[i
].addr_hi
= upper_32_bits(sg_list
[i
].addr
);
1971 sge
[i
].len
= sg_list
[i
].length
;
1972 hdr
->total_len
+= sg_list
[i
].length
;
1975 memset(sge
, 0, sizeof(*sge
));
1978 static inline uint32_t ocrdma_sglist_len(struct ib_sge
*sg_list
, int num_sge
)
1980 uint32_t total_len
= 0, i
;
1982 for (i
= 0; i
< num_sge
; i
++)
1983 total_len
+= sg_list
[i
].length
;
1988 static int ocrdma_build_inline_sges(struct ocrdma_qp
*qp
,
1989 struct ocrdma_hdr_wqe
*hdr
,
1990 struct ocrdma_sge
*sge
,
1991 const struct ib_send_wr
*wr
, u32 wqe_size
)
1996 if (wr
->send_flags
& IB_SEND_INLINE
&& qp
->qp_type
!= IB_QPT_UD
) {
1997 hdr
->total_len
= ocrdma_sglist_len(wr
->sg_list
, wr
->num_sge
);
1998 if (unlikely(hdr
->total_len
> qp
->max_inline_data
)) {
1999 pr_err("%s() supported_len=0x%x,\n"
2000 " unsupported len req=0x%x\n", __func__
,
2001 qp
->max_inline_data
, hdr
->total_len
);
2004 dpp_addr
= (char *)sge
;
2005 for (i
= 0; i
< wr
->num_sge
; i
++) {
2007 (void *)(unsigned long)wr
->sg_list
[i
].addr
,
2008 wr
->sg_list
[i
].length
);
2009 dpp_addr
+= wr
->sg_list
[i
].length
;
2012 wqe_size
+= roundup(hdr
->total_len
, OCRDMA_WQE_ALIGN_BYTES
);
2013 if (0 == hdr
->total_len
)
2014 wqe_size
+= sizeof(struct ocrdma_sge
);
2015 hdr
->cw
|= (OCRDMA_TYPE_INLINE
<< OCRDMA_WQE_TYPE_SHIFT
);
2017 ocrdma_build_sges(hdr
, sge
, wr
->num_sge
, wr
->sg_list
);
2019 wqe_size
+= (wr
->num_sge
* sizeof(struct ocrdma_sge
));
2021 wqe_size
+= sizeof(struct ocrdma_sge
);
2022 hdr
->cw
|= (OCRDMA_TYPE_LKEY
<< OCRDMA_WQE_TYPE_SHIFT
);
2024 hdr
->cw
|= ((wqe_size
/ OCRDMA_WQE_STRIDE
) << OCRDMA_WQE_SIZE_SHIFT
);
2028 static int ocrdma_build_send(struct ocrdma_qp
*qp
, struct ocrdma_hdr_wqe
*hdr
,
2029 const struct ib_send_wr
*wr
)
2032 struct ocrdma_sge
*sge
;
2033 u32 wqe_size
= sizeof(*hdr
);
2035 if (qp
->qp_type
== IB_QPT_UD
|| qp
->qp_type
== IB_QPT_GSI
) {
2036 ocrdma_build_ud_hdr(qp
, hdr
, wr
);
2037 sge
= (struct ocrdma_sge
*)(hdr
+ 2);
2038 wqe_size
+= sizeof(struct ocrdma_ewqe_ud_hdr
);
2040 sge
= (struct ocrdma_sge
*)(hdr
+ 1);
2043 status
= ocrdma_build_inline_sges(qp
, hdr
, sge
, wr
, wqe_size
);
2047 static int ocrdma_build_write(struct ocrdma_qp
*qp
, struct ocrdma_hdr_wqe
*hdr
,
2048 const struct ib_send_wr
*wr
)
2051 struct ocrdma_sge
*ext_rw
= (struct ocrdma_sge
*)(hdr
+ 1);
2052 struct ocrdma_sge
*sge
= ext_rw
+ 1;
2053 u32 wqe_size
= sizeof(*hdr
) + sizeof(*ext_rw
);
2055 status
= ocrdma_build_inline_sges(qp
, hdr
, sge
, wr
, wqe_size
);
2058 ext_rw
->addr_lo
= rdma_wr(wr
)->remote_addr
;
2059 ext_rw
->addr_hi
= upper_32_bits(rdma_wr(wr
)->remote_addr
);
2060 ext_rw
->lrkey
= rdma_wr(wr
)->rkey
;
2061 ext_rw
->len
= hdr
->total_len
;
2065 static void ocrdma_build_read(struct ocrdma_qp
*qp
, struct ocrdma_hdr_wqe
*hdr
,
2066 const struct ib_send_wr
*wr
)
2068 struct ocrdma_sge
*ext_rw
= (struct ocrdma_sge
*)(hdr
+ 1);
2069 struct ocrdma_sge
*sge
= ext_rw
+ 1;
2070 u32 wqe_size
= ((wr
->num_sge
+ 1) * sizeof(struct ocrdma_sge
)) +
2071 sizeof(struct ocrdma_hdr_wqe
);
2073 ocrdma_build_sges(hdr
, sge
, wr
->num_sge
, wr
->sg_list
);
2074 hdr
->cw
|= ((wqe_size
/ OCRDMA_WQE_STRIDE
) << OCRDMA_WQE_SIZE_SHIFT
);
2075 hdr
->cw
|= (OCRDMA_READ
<< OCRDMA_WQE_OPCODE_SHIFT
);
2076 hdr
->cw
|= (OCRDMA_TYPE_LKEY
<< OCRDMA_WQE_TYPE_SHIFT
);
2078 ext_rw
->addr_lo
= rdma_wr(wr
)->remote_addr
;
2079 ext_rw
->addr_hi
= upper_32_bits(rdma_wr(wr
)->remote_addr
);
2080 ext_rw
->lrkey
= rdma_wr(wr
)->rkey
;
2081 ext_rw
->len
= hdr
->total_len
;
2084 static int get_encoded_page_size(int pg_sz
)
2086 /* Max size is 256M 4096 << 16 */
2089 if (pg_sz
== (4096 << i
))
2094 static int ocrdma_build_reg(struct ocrdma_qp
*qp
,
2095 struct ocrdma_hdr_wqe
*hdr
,
2096 const struct ib_reg_wr
*wr
)
2099 struct ocrdma_ewqe_fr
*fast_reg
= (struct ocrdma_ewqe_fr
*)(hdr
+ 1);
2100 struct ocrdma_mr
*mr
= get_ocrdma_mr(wr
->mr
);
2101 struct ocrdma_pbl
*pbl_tbl
= mr
->hwmr
.pbl_table
;
2102 struct ocrdma_pbe
*pbe
;
2103 u32 wqe_size
= sizeof(*fast_reg
) + sizeof(*hdr
);
2104 int num_pbes
= 0, i
;
2106 wqe_size
= roundup(wqe_size
, OCRDMA_WQE_ALIGN_BYTES
);
2108 hdr
->cw
|= (OCRDMA_FR_MR
<< OCRDMA_WQE_OPCODE_SHIFT
);
2109 hdr
->cw
|= ((wqe_size
/ OCRDMA_WQE_STRIDE
) << OCRDMA_WQE_SIZE_SHIFT
);
2111 if (wr
->access
& IB_ACCESS_LOCAL_WRITE
)
2112 hdr
->rsvd_lkey_flags
|= OCRDMA_LKEY_FLAG_LOCAL_WR
;
2113 if (wr
->access
& IB_ACCESS_REMOTE_WRITE
)
2114 hdr
->rsvd_lkey_flags
|= OCRDMA_LKEY_FLAG_REMOTE_WR
;
2115 if (wr
->access
& IB_ACCESS_REMOTE_READ
)
2116 hdr
->rsvd_lkey_flags
|= OCRDMA_LKEY_FLAG_REMOTE_RD
;
2117 hdr
->lkey
= wr
->key
;
2118 hdr
->total_len
= mr
->ibmr
.length
;
2120 fbo
= mr
->ibmr
.iova
- mr
->pages
[0];
2122 fast_reg
->va_hi
= upper_32_bits(mr
->ibmr
.iova
);
2123 fast_reg
->va_lo
= (u32
) (mr
->ibmr
.iova
& 0xffffffff);
2124 fast_reg
->fbo_hi
= upper_32_bits(fbo
);
2125 fast_reg
->fbo_lo
= (u32
) fbo
& 0xffffffff;
2126 fast_reg
->num_sges
= mr
->npages
;
2127 fast_reg
->size_sge
= get_encoded_page_size(mr
->ibmr
.page_size
);
2130 for (i
= 0; i
< mr
->npages
; i
++) {
2131 u64 buf_addr
= mr
->pages
[i
];
2133 pbe
->pa_lo
= cpu_to_le32((u32
) (buf_addr
& PAGE_MASK
));
2134 pbe
->pa_hi
= cpu_to_le32((u32
) upper_32_bits(buf_addr
));
2138 /* if the pbl is full storing the pbes,
2141 if (num_pbes
== (mr
->hwmr
.pbl_size
/sizeof(u64
))) {
2143 pbe
= (struct ocrdma_pbe
*)pbl_tbl
->va
;
2150 static void ocrdma_ring_sq_db(struct ocrdma_qp
*qp
)
2152 u32 val
= qp
->sq
.dbid
| (1 << OCRDMA_DB_SQ_SHIFT
);
2154 iowrite32(val
, qp
->sq_db
);
2157 int ocrdma_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
2158 const struct ib_send_wr
**bad_wr
)
2161 struct ocrdma_qp
*qp
= get_ocrdma_qp(ibqp
);
2162 struct ocrdma_hdr_wqe
*hdr
;
2163 unsigned long flags
;
2165 spin_lock_irqsave(&qp
->q_lock
, flags
);
2166 if (qp
->state
!= OCRDMA_QPS_RTS
&& qp
->state
!= OCRDMA_QPS_SQD
) {
2167 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2173 if (qp
->qp_type
== IB_QPT_UD
&&
2174 (wr
->opcode
!= IB_WR_SEND
&&
2175 wr
->opcode
!= IB_WR_SEND_WITH_IMM
)) {
2180 if (ocrdma_hwq_free_cnt(&qp
->sq
) == 0 ||
2181 wr
->num_sge
> qp
->sq
.max_sges
) {
2186 hdr
= ocrdma_hwq_head(&qp
->sq
);
2188 if (wr
->send_flags
& IB_SEND_SIGNALED
|| qp
->signaled
)
2189 hdr
->cw
|= (OCRDMA_FLAG_SIG
<< OCRDMA_WQE_FLAGS_SHIFT
);
2190 if (wr
->send_flags
& IB_SEND_FENCE
)
2192 (OCRDMA_FLAG_FENCE_L
<< OCRDMA_WQE_FLAGS_SHIFT
);
2193 if (wr
->send_flags
& IB_SEND_SOLICITED
)
2195 (OCRDMA_FLAG_SOLICIT
<< OCRDMA_WQE_FLAGS_SHIFT
);
2197 switch (wr
->opcode
) {
2198 case IB_WR_SEND_WITH_IMM
:
2199 hdr
->cw
|= (OCRDMA_FLAG_IMM
<< OCRDMA_WQE_FLAGS_SHIFT
);
2200 hdr
->immdt
= ntohl(wr
->ex
.imm_data
);
2203 hdr
->cw
|= (OCRDMA_SEND
<< OCRDMA_WQE_OPCODE_SHIFT
);
2204 ocrdma_build_send(qp
, hdr
, wr
);
2206 case IB_WR_SEND_WITH_INV
:
2207 hdr
->cw
|= (OCRDMA_FLAG_INV
<< OCRDMA_WQE_FLAGS_SHIFT
);
2208 hdr
->cw
|= (OCRDMA_SEND
<< OCRDMA_WQE_OPCODE_SHIFT
);
2209 hdr
->lkey
= wr
->ex
.invalidate_rkey
;
2210 status
= ocrdma_build_send(qp
, hdr
, wr
);
2212 case IB_WR_RDMA_WRITE_WITH_IMM
:
2213 hdr
->cw
|= (OCRDMA_FLAG_IMM
<< OCRDMA_WQE_FLAGS_SHIFT
);
2214 hdr
->immdt
= ntohl(wr
->ex
.imm_data
);
2216 case IB_WR_RDMA_WRITE
:
2217 hdr
->cw
|= (OCRDMA_WRITE
<< OCRDMA_WQE_OPCODE_SHIFT
);
2218 status
= ocrdma_build_write(qp
, hdr
, wr
);
2220 case IB_WR_RDMA_READ
:
2221 ocrdma_build_read(qp
, hdr
, wr
);
2223 case IB_WR_LOCAL_INV
:
2225 (OCRDMA_LKEY_INV
<< OCRDMA_WQE_OPCODE_SHIFT
);
2226 hdr
->cw
|= ((sizeof(struct ocrdma_hdr_wqe
) +
2227 sizeof(struct ocrdma_sge
)) /
2228 OCRDMA_WQE_STRIDE
) << OCRDMA_WQE_SIZE_SHIFT
;
2229 hdr
->lkey
= wr
->ex
.invalidate_rkey
;
2232 status
= ocrdma_build_reg(qp
, hdr
, reg_wr(wr
));
2242 if (wr
->send_flags
& IB_SEND_SIGNALED
|| qp
->signaled
)
2243 qp
->wqe_wr_id_tbl
[qp
->sq
.head
].signaled
= 1;
2245 qp
->wqe_wr_id_tbl
[qp
->sq
.head
].signaled
= 0;
2246 qp
->wqe_wr_id_tbl
[qp
->sq
.head
].wrid
= wr
->wr_id
;
2247 ocrdma_cpu_to_le32(hdr
, ((hdr
->cw
>> OCRDMA_WQE_SIZE_SHIFT
) &
2248 OCRDMA_WQE_SIZE_MASK
) * OCRDMA_WQE_STRIDE
);
2249 /* make sure wqe is written before adapter can access it */
2251 /* inform hw to start processing it */
2252 ocrdma_ring_sq_db(qp
);
2254 /* update pointer, counter for next wr */
2255 ocrdma_hwq_inc_head(&qp
->sq
);
2258 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2262 static void ocrdma_ring_rq_db(struct ocrdma_qp
*qp
)
2264 u32 val
= qp
->rq
.dbid
| (1 << OCRDMA_DB_RQ_SHIFT
);
2266 iowrite32(val
, qp
->rq_db
);
2269 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe
*rqe
,
2270 const struct ib_recv_wr
*wr
, u16 tag
)
2273 struct ocrdma_sge
*sge
;
2275 wqe_size
= (wr
->num_sge
* sizeof(*sge
)) + sizeof(*rqe
);
2277 wqe_size
= sizeof(*sge
) + sizeof(*rqe
);
2279 rqe
->cw
= ((wqe_size
/ OCRDMA_WQE_STRIDE
) <<
2280 OCRDMA_WQE_SIZE_SHIFT
);
2281 rqe
->cw
|= (OCRDMA_FLAG_SIG
<< OCRDMA_WQE_FLAGS_SHIFT
);
2282 rqe
->cw
|= (OCRDMA_TYPE_LKEY
<< OCRDMA_WQE_TYPE_SHIFT
);
2284 rqe
->rsvd_tag
= tag
;
2285 sge
= (struct ocrdma_sge
*)(rqe
+ 1);
2286 ocrdma_build_sges(rqe
, sge
, wr
->num_sge
, wr
->sg_list
);
2287 ocrdma_cpu_to_le32(rqe
, wqe_size
);
2290 int ocrdma_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
2291 const struct ib_recv_wr
**bad_wr
)
2294 unsigned long flags
;
2295 struct ocrdma_qp
*qp
= get_ocrdma_qp(ibqp
);
2296 struct ocrdma_hdr_wqe
*rqe
;
2298 spin_lock_irqsave(&qp
->q_lock
, flags
);
2299 if (qp
->state
== OCRDMA_QPS_RST
|| qp
->state
== OCRDMA_QPS_ERR
) {
2300 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2305 if (ocrdma_hwq_free_cnt(&qp
->rq
) == 0 ||
2306 wr
->num_sge
> qp
->rq
.max_sges
) {
2311 rqe
= ocrdma_hwq_head(&qp
->rq
);
2312 ocrdma_build_rqe(rqe
, wr
, 0);
2314 qp
->rqe_wr_id_tbl
[qp
->rq
.head
] = wr
->wr_id
;
2315 /* make sure rqe is written before adapter can access it */
2318 /* inform hw to start processing it */
2319 ocrdma_ring_rq_db(qp
);
2321 /* update pointer, counter for next wr */
2322 ocrdma_hwq_inc_head(&qp
->rq
);
2325 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2329 /* cqe for srq's rqe can potentially arrive out of order.
2330 * index gives the entry in the shadow table where to store
2331 * the wr_id. tag/index is returned in cqe to reference back
2334 static int ocrdma_srq_get_idx(struct ocrdma_srq
*srq
)
2339 for (row
= 0; row
< srq
->bit_fields_len
; row
++) {
2340 if (srq
->idx_bit_fields
[row
]) {
2341 indx
= ffs(srq
->idx_bit_fields
[row
]);
2342 indx
= (row
* 32) + (indx
- 1);
2343 BUG_ON(indx
>= srq
->rq
.max_cnt
);
2344 ocrdma_srq_toggle_bit(srq
, indx
);
2349 BUG_ON(row
== srq
->bit_fields_len
);
2350 return indx
+ 1; /* Use from index 1 */
2353 static void ocrdma_ring_srq_db(struct ocrdma_srq
*srq
)
2355 u32 val
= srq
->rq
.dbid
| (1 << 16);
2357 iowrite32(val
, srq
->db
+ OCRDMA_DB_GEN2_SRQ_OFFSET
);
2360 int ocrdma_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
2361 const struct ib_recv_wr
**bad_wr
)
2364 unsigned long flags
;
2365 struct ocrdma_srq
*srq
;
2366 struct ocrdma_hdr_wqe
*rqe
;
2369 srq
= get_ocrdma_srq(ibsrq
);
2371 spin_lock_irqsave(&srq
->q_lock
, flags
);
2373 if (ocrdma_hwq_free_cnt(&srq
->rq
) == 0 ||
2374 wr
->num_sge
> srq
->rq
.max_sges
) {
2379 tag
= ocrdma_srq_get_idx(srq
);
2380 rqe
= ocrdma_hwq_head(&srq
->rq
);
2381 ocrdma_build_rqe(rqe
, wr
, tag
);
2383 srq
->rqe_wr_id_tbl
[tag
] = wr
->wr_id
;
2384 /* make sure rqe is written before adapter can perform DMA */
2386 /* inform hw to start processing it */
2387 ocrdma_ring_srq_db(srq
);
2388 /* update pointer, counter for next wr */
2389 ocrdma_hwq_inc_head(&srq
->rq
);
2392 spin_unlock_irqrestore(&srq
->q_lock
, flags
);
2396 static enum ib_wc_status
ocrdma_to_ibwc_err(u16 status
)
2398 enum ib_wc_status ibwc_status
;
2401 case OCRDMA_CQE_GENERAL_ERR
:
2402 ibwc_status
= IB_WC_GENERAL_ERR
;
2404 case OCRDMA_CQE_LOC_LEN_ERR
:
2405 ibwc_status
= IB_WC_LOC_LEN_ERR
;
2407 case OCRDMA_CQE_LOC_QP_OP_ERR
:
2408 ibwc_status
= IB_WC_LOC_QP_OP_ERR
;
2410 case OCRDMA_CQE_LOC_EEC_OP_ERR
:
2411 ibwc_status
= IB_WC_LOC_EEC_OP_ERR
;
2413 case OCRDMA_CQE_LOC_PROT_ERR
:
2414 ibwc_status
= IB_WC_LOC_PROT_ERR
;
2416 case OCRDMA_CQE_WR_FLUSH_ERR
:
2417 ibwc_status
= IB_WC_WR_FLUSH_ERR
;
2419 case OCRDMA_CQE_MW_BIND_ERR
:
2420 ibwc_status
= IB_WC_MW_BIND_ERR
;
2422 case OCRDMA_CQE_BAD_RESP_ERR
:
2423 ibwc_status
= IB_WC_BAD_RESP_ERR
;
2425 case OCRDMA_CQE_LOC_ACCESS_ERR
:
2426 ibwc_status
= IB_WC_LOC_ACCESS_ERR
;
2428 case OCRDMA_CQE_REM_INV_REQ_ERR
:
2429 ibwc_status
= IB_WC_REM_INV_REQ_ERR
;
2431 case OCRDMA_CQE_REM_ACCESS_ERR
:
2432 ibwc_status
= IB_WC_REM_ACCESS_ERR
;
2434 case OCRDMA_CQE_REM_OP_ERR
:
2435 ibwc_status
= IB_WC_REM_OP_ERR
;
2437 case OCRDMA_CQE_RETRY_EXC_ERR
:
2438 ibwc_status
= IB_WC_RETRY_EXC_ERR
;
2440 case OCRDMA_CQE_RNR_RETRY_EXC_ERR
:
2441 ibwc_status
= IB_WC_RNR_RETRY_EXC_ERR
;
2443 case OCRDMA_CQE_LOC_RDD_VIOL_ERR
:
2444 ibwc_status
= IB_WC_LOC_RDD_VIOL_ERR
;
2446 case OCRDMA_CQE_REM_INV_RD_REQ_ERR
:
2447 ibwc_status
= IB_WC_REM_INV_RD_REQ_ERR
;
2449 case OCRDMA_CQE_REM_ABORT_ERR
:
2450 ibwc_status
= IB_WC_REM_ABORT_ERR
;
2452 case OCRDMA_CQE_INV_EECN_ERR
:
2453 ibwc_status
= IB_WC_INV_EECN_ERR
;
2455 case OCRDMA_CQE_INV_EEC_STATE_ERR
:
2456 ibwc_status
= IB_WC_INV_EEC_STATE_ERR
;
2458 case OCRDMA_CQE_FATAL_ERR
:
2459 ibwc_status
= IB_WC_FATAL_ERR
;
2461 case OCRDMA_CQE_RESP_TIMEOUT_ERR
:
2462 ibwc_status
= IB_WC_RESP_TIMEOUT_ERR
;
2465 ibwc_status
= IB_WC_GENERAL_ERR
;
2471 static void ocrdma_update_wc(struct ocrdma_qp
*qp
, struct ib_wc
*ibwc
,
2474 struct ocrdma_hdr_wqe
*hdr
;
2475 struct ocrdma_sge
*rw
;
2478 hdr
= ocrdma_hwq_head_from_idx(&qp
->sq
, wqe_idx
);
2480 ibwc
->wr_id
= qp
->wqe_wr_id_tbl
[wqe_idx
].wrid
;
2481 /* Undo the hdr->cw swap */
2482 opcode
= le32_to_cpu(hdr
->cw
) & OCRDMA_WQE_OPCODE_MASK
;
2485 ibwc
->opcode
= IB_WC_RDMA_WRITE
;
2488 rw
= (struct ocrdma_sge
*)(hdr
+ 1);
2489 ibwc
->opcode
= IB_WC_RDMA_READ
;
2490 ibwc
->byte_len
= rw
->len
;
2493 ibwc
->opcode
= IB_WC_SEND
;
2496 ibwc
->opcode
= IB_WC_REG_MR
;
2498 case OCRDMA_LKEY_INV
:
2499 ibwc
->opcode
= IB_WC_LOCAL_INV
;
2502 ibwc
->status
= IB_WC_GENERAL_ERR
;
2503 pr_err("%s() invalid opcode received = 0x%x\n",
2504 __func__
, hdr
->cw
& OCRDMA_WQE_OPCODE_MASK
);
2509 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp
*qp
,
2510 struct ocrdma_cqe
*cqe
)
2512 if (is_cqe_for_sq(cqe
)) {
2513 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2514 cqe
->flags_status_srcqpn
) &
2515 ~OCRDMA_CQE_STATUS_MASK
);
2516 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2517 cqe
->flags_status_srcqpn
) |
2518 (OCRDMA_CQE_WR_FLUSH_ERR
<<
2519 OCRDMA_CQE_STATUS_SHIFT
));
2521 if (qp
->qp_type
== IB_QPT_UD
|| qp
->qp_type
== IB_QPT_GSI
) {
2522 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2523 cqe
->flags_status_srcqpn
) &
2524 ~OCRDMA_CQE_UD_STATUS_MASK
);
2525 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2526 cqe
->flags_status_srcqpn
) |
2527 (OCRDMA_CQE_WR_FLUSH_ERR
<<
2528 OCRDMA_CQE_UD_STATUS_SHIFT
));
2530 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2531 cqe
->flags_status_srcqpn
) &
2532 ~OCRDMA_CQE_STATUS_MASK
);
2533 cqe
->flags_status_srcqpn
= cpu_to_le32(le32_to_cpu(
2534 cqe
->flags_status_srcqpn
) |
2535 (OCRDMA_CQE_WR_FLUSH_ERR
<<
2536 OCRDMA_CQE_STATUS_SHIFT
));
2541 static bool ocrdma_update_err_cqe(struct ib_wc
*ibwc
, struct ocrdma_cqe
*cqe
,
2542 struct ocrdma_qp
*qp
, int status
)
2544 bool expand
= false;
2547 ibwc
->qp
= &qp
->ibqp
;
2548 ibwc
->status
= ocrdma_to_ibwc_err(status
);
2550 ocrdma_flush_qp(qp
);
2551 ocrdma_qp_state_change(qp
, IB_QPS_ERR
, NULL
);
2553 /* if wqe/rqe pending for which cqe needs to be returned,
2554 * trigger inflating it.
2556 if (!is_hw_rq_empty(qp
) || !is_hw_sq_empty(qp
)) {
2558 ocrdma_set_cqe_status_flushed(qp
, cqe
);
2563 static int ocrdma_update_err_rcqe(struct ib_wc
*ibwc
, struct ocrdma_cqe
*cqe
,
2564 struct ocrdma_qp
*qp
, int status
)
2566 ibwc
->opcode
= IB_WC_RECV
;
2567 ibwc
->wr_id
= qp
->rqe_wr_id_tbl
[qp
->rq
.tail
];
2568 ocrdma_hwq_inc_tail(&qp
->rq
);
2570 return ocrdma_update_err_cqe(ibwc
, cqe
, qp
, status
);
2573 static int ocrdma_update_err_scqe(struct ib_wc
*ibwc
, struct ocrdma_cqe
*cqe
,
2574 struct ocrdma_qp
*qp
, int status
)
2576 ocrdma_update_wc(qp
, ibwc
, qp
->sq
.tail
);
2577 ocrdma_hwq_inc_tail(&qp
->sq
);
2579 return ocrdma_update_err_cqe(ibwc
, cqe
, qp
, status
);
2583 static bool ocrdma_poll_err_scqe(struct ocrdma_qp
*qp
,
2584 struct ocrdma_cqe
*cqe
, struct ib_wc
*ibwc
,
2585 bool *polled
, bool *stop
)
2588 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2589 int status
= (le32_to_cpu(cqe
->flags_status_srcqpn
) &
2590 OCRDMA_CQE_STATUS_MASK
) >> OCRDMA_CQE_STATUS_SHIFT
;
2591 if (status
< OCRDMA_MAX_CQE_ERR
)
2592 atomic_inc(&dev
->cqe_err_stats
[status
]);
2594 /* when hw sq is empty, but rq is not empty, so we continue
2595 * to keep the cqe in order to get the cq event again.
2597 if (is_hw_sq_empty(qp
) && !is_hw_rq_empty(qp
)) {
2598 /* when cq for rq and sq is same, it is safe to return
2599 * flush cqe for RQEs.
2601 if (!qp
->srq
&& (qp
->sq_cq
== qp
->rq_cq
)) {
2603 status
= OCRDMA_CQE_WR_FLUSH_ERR
;
2604 expand
= ocrdma_update_err_rcqe(ibwc
, cqe
, qp
, status
);
2606 /* stop processing further cqe as this cqe is used for
2607 * triggering cq event on buddy cq of RQ.
2608 * When QP is destroyed, this cqe will be removed
2609 * from the cq's hardware q.
2615 } else if (is_hw_sq_empty(qp
)) {
2622 expand
= ocrdma_update_err_scqe(ibwc
, cqe
, qp
, status
);
2627 static bool ocrdma_poll_success_scqe(struct ocrdma_qp
*qp
,
2628 struct ocrdma_cqe
*cqe
,
2629 struct ib_wc
*ibwc
, bool *polled
)
2631 bool expand
= false;
2632 int tail
= qp
->sq
.tail
;
2635 if (!qp
->wqe_wr_id_tbl
[tail
].signaled
) {
2636 *polled
= false; /* WC cannot be consumed yet */
2638 ibwc
->status
= IB_WC_SUCCESS
;
2640 ibwc
->qp
= &qp
->ibqp
;
2641 ocrdma_update_wc(qp
, ibwc
, tail
);
2644 wqe_idx
= (le32_to_cpu(cqe
->wq
.wqeidx
) &
2645 OCRDMA_CQE_WQEIDX_MASK
) & qp
->sq
.max_wqe_idx
;
2646 if (tail
!= wqe_idx
)
2647 expand
= true; /* Coalesced CQE can't be consumed yet */
2649 ocrdma_hwq_inc_tail(&qp
->sq
);
2653 static bool ocrdma_poll_scqe(struct ocrdma_qp
*qp
, struct ocrdma_cqe
*cqe
,
2654 struct ib_wc
*ibwc
, bool *polled
, bool *stop
)
2659 status
= (le32_to_cpu(cqe
->flags_status_srcqpn
) &
2660 OCRDMA_CQE_STATUS_MASK
) >> OCRDMA_CQE_STATUS_SHIFT
;
2662 if (status
== OCRDMA_CQE_SUCCESS
)
2663 expand
= ocrdma_poll_success_scqe(qp
, cqe
, ibwc
, polled
);
2665 expand
= ocrdma_poll_err_scqe(qp
, cqe
, ibwc
, polled
, stop
);
2669 static int ocrdma_update_ud_rcqe(struct ocrdma_dev
*dev
, struct ib_wc
*ibwc
,
2670 struct ocrdma_cqe
*cqe
)
2675 status
= (le32_to_cpu(cqe
->flags_status_srcqpn
) &
2676 OCRDMA_CQE_UD_STATUS_MASK
) >> OCRDMA_CQE_UD_STATUS_SHIFT
;
2677 ibwc
->src_qp
= le32_to_cpu(cqe
->flags_status_srcqpn
) &
2678 OCRDMA_CQE_SRCQP_MASK
;
2679 ibwc
->pkey_index
= 0;
2680 ibwc
->wc_flags
= IB_WC_GRH
;
2681 ibwc
->byte_len
= (le32_to_cpu(cqe
->ud
.rxlen_pkey
) >>
2682 OCRDMA_CQE_UD_XFER_LEN_SHIFT
) &
2683 OCRDMA_CQE_UD_XFER_LEN_MASK
;
2685 if (ocrdma_is_udp_encap_supported(dev
)) {
2686 hdr_type
= (le32_to_cpu(cqe
->ud
.rxlen_pkey
) >>
2687 OCRDMA_CQE_UD_L3TYPE_SHIFT
) &
2688 OCRDMA_CQE_UD_L3TYPE_MASK
;
2689 ibwc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
2690 ibwc
->network_hdr_type
= hdr_type
;
2696 static void ocrdma_update_free_srq_cqe(struct ib_wc
*ibwc
,
2697 struct ocrdma_cqe
*cqe
,
2698 struct ocrdma_qp
*qp
)
2700 unsigned long flags
;
2701 struct ocrdma_srq
*srq
;
2704 srq
= get_ocrdma_srq(qp
->ibqp
.srq
);
2705 wqe_idx
= (le32_to_cpu(cqe
->rq
.buftag_qpn
) >>
2706 OCRDMA_CQE_BUFTAG_SHIFT
) & srq
->rq
.max_wqe_idx
;
2707 BUG_ON(wqe_idx
< 1);
2709 ibwc
->wr_id
= srq
->rqe_wr_id_tbl
[wqe_idx
];
2710 spin_lock_irqsave(&srq
->q_lock
, flags
);
2711 ocrdma_srq_toggle_bit(srq
, wqe_idx
- 1);
2712 spin_unlock_irqrestore(&srq
->q_lock
, flags
);
2713 ocrdma_hwq_inc_tail(&srq
->rq
);
2716 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp
*qp
, struct ocrdma_cqe
*cqe
,
2717 struct ib_wc
*ibwc
, bool *polled
, bool *stop
,
2721 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2723 if (status
< OCRDMA_MAX_CQE_ERR
)
2724 atomic_inc(&dev
->cqe_err_stats
[status
]);
2726 /* when hw_rq is empty, but wq is not empty, so continue
2727 * to keep the cqe to get the cq event again.
2729 if (is_hw_rq_empty(qp
) && !is_hw_sq_empty(qp
)) {
2730 if (!qp
->srq
&& (qp
->sq_cq
== qp
->rq_cq
)) {
2732 status
= OCRDMA_CQE_WR_FLUSH_ERR
;
2733 expand
= ocrdma_update_err_scqe(ibwc
, cqe
, qp
, status
);
2739 } else if (is_hw_rq_empty(qp
)) {
2746 expand
= ocrdma_update_err_rcqe(ibwc
, cqe
, qp
, status
);
2751 static void ocrdma_poll_success_rcqe(struct ocrdma_qp
*qp
,
2752 struct ocrdma_cqe
*cqe
, struct ib_wc
*ibwc
)
2754 struct ocrdma_dev
*dev
;
2756 dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2757 ibwc
->opcode
= IB_WC_RECV
;
2758 ibwc
->qp
= &qp
->ibqp
;
2759 ibwc
->status
= IB_WC_SUCCESS
;
2761 if (qp
->qp_type
== IB_QPT_UD
|| qp
->qp_type
== IB_QPT_GSI
)
2762 ocrdma_update_ud_rcqe(dev
, ibwc
, cqe
);
2764 ibwc
->byte_len
= le32_to_cpu(cqe
->rq
.rxlen
);
2766 if (is_cqe_imm(cqe
)) {
2767 ibwc
->ex
.imm_data
= htonl(le32_to_cpu(cqe
->rq
.lkey_immdt
));
2768 ibwc
->wc_flags
|= IB_WC_WITH_IMM
;
2769 } else if (is_cqe_wr_imm(cqe
)) {
2770 ibwc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2771 ibwc
->ex
.imm_data
= htonl(le32_to_cpu(cqe
->rq
.lkey_immdt
));
2772 ibwc
->wc_flags
|= IB_WC_WITH_IMM
;
2773 } else if (is_cqe_invalidated(cqe
)) {
2774 ibwc
->ex
.invalidate_rkey
= le32_to_cpu(cqe
->rq
.lkey_immdt
);
2775 ibwc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
2778 ocrdma_update_free_srq_cqe(ibwc
, cqe
, qp
);
2780 ibwc
->wr_id
= qp
->rqe_wr_id_tbl
[qp
->rq
.tail
];
2781 ocrdma_hwq_inc_tail(&qp
->rq
);
2785 static bool ocrdma_poll_rcqe(struct ocrdma_qp
*qp
, struct ocrdma_cqe
*cqe
,
2786 struct ib_wc
*ibwc
, bool *polled
, bool *stop
)
2789 bool expand
= false;
2792 if (qp
->qp_type
== IB_QPT_UD
|| qp
->qp_type
== IB_QPT_GSI
) {
2793 status
= (le32_to_cpu(cqe
->flags_status_srcqpn
) &
2794 OCRDMA_CQE_UD_STATUS_MASK
) >>
2795 OCRDMA_CQE_UD_STATUS_SHIFT
;
2797 status
= (le32_to_cpu(cqe
->flags_status_srcqpn
) &
2798 OCRDMA_CQE_STATUS_MASK
) >> OCRDMA_CQE_STATUS_SHIFT
;
2801 if (status
== OCRDMA_CQE_SUCCESS
) {
2803 ocrdma_poll_success_rcqe(qp
, cqe
, ibwc
);
2805 expand
= ocrdma_poll_err_rcqe(qp
, cqe
, ibwc
, polled
, stop
,
2811 static void ocrdma_change_cq_phase(struct ocrdma_cq
*cq
, struct ocrdma_cqe
*cqe
,
2814 if (cq
->phase_change
) {
2816 cq
->phase
= (~cq
->phase
& OCRDMA_CQE_VALID
);
2818 /* clear valid bit */
2819 cqe
->flags_status_srcqpn
= 0;
2823 static int ocrdma_poll_hwcq(struct ocrdma_cq
*cq
, int num_entries
,
2828 bool expand
= false;
2829 int polled_hw_cqes
= 0;
2830 struct ocrdma_qp
*qp
= NULL
;
2831 struct ocrdma_dev
*dev
= get_ocrdma_dev(cq
->ibcq
.device
);
2832 struct ocrdma_cqe
*cqe
;
2833 u16 cur_getp
; bool polled
= false; bool stop
= false;
2835 cur_getp
= cq
->getp
;
2836 while (num_entries
) {
2837 cqe
= cq
->va
+ cur_getp
;
2838 /* check whether valid cqe or not */
2839 if (!is_cqe_valid(cq
, cqe
))
2841 qpn
= (le32_to_cpu(cqe
->cmn
.qpn
) & OCRDMA_CQE_QPN_MASK
);
2842 /* ignore discarded cqe */
2845 qp
= dev
->qp_tbl
[qpn
];
2848 if (is_cqe_for_sq(cqe
)) {
2849 expand
= ocrdma_poll_scqe(qp
, cqe
, ibwc
, &polled
,
2852 expand
= ocrdma_poll_rcqe(qp
, cqe
, ibwc
, &polled
,
2859 /* clear qpn to avoid duplicate processing by discard_cqe() */
2862 polled_hw_cqes
+= 1;
2863 cur_getp
= (cur_getp
+ 1) % cq
->max_hw_cqe
;
2864 ocrdma_change_cq_phase(cq
, cqe
, cur_getp
);
2874 cq
->getp
= cur_getp
;
2877 ocrdma_ring_cq_db(dev
, cq
->id
, false, false, polled_hw_cqes
);
2882 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2883 static int ocrdma_add_err_cqe(struct ocrdma_cq
*cq
, int num_entries
,
2884 struct ocrdma_qp
*qp
, struct ib_wc
*ibwc
)
2888 while (num_entries
) {
2889 if (is_hw_sq_empty(qp
) && is_hw_rq_empty(qp
))
2891 if (!is_hw_sq_empty(qp
) && qp
->sq_cq
== cq
) {
2892 ocrdma_update_wc(qp
, ibwc
, qp
->sq
.tail
);
2893 ocrdma_hwq_inc_tail(&qp
->sq
);
2894 } else if (!is_hw_rq_empty(qp
) && qp
->rq_cq
== cq
) {
2895 ibwc
->wr_id
= qp
->rqe_wr_id_tbl
[qp
->rq
.tail
];
2896 ocrdma_hwq_inc_tail(&qp
->rq
);
2901 ibwc
->status
= IB_WC_WR_FLUSH_ERR
;
2909 int ocrdma_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
2911 int cqes_to_poll
= num_entries
;
2912 struct ocrdma_cq
*cq
= get_ocrdma_cq(ibcq
);
2913 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibcq
->device
);
2914 int num_os_cqe
= 0, err_cqes
= 0;
2915 struct ocrdma_qp
*qp
;
2916 unsigned long flags
;
2918 /* poll cqes from adapter CQ */
2919 spin_lock_irqsave(&cq
->cq_lock
, flags
);
2920 num_os_cqe
= ocrdma_poll_hwcq(cq
, cqes_to_poll
, wc
);
2921 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
2922 cqes_to_poll
-= num_os_cqe
;
2925 wc
= wc
+ num_os_cqe
;
2926 /* adapter returns single error cqe when qp moves to
2927 * error state. So insert error cqes with wc_status as
2928 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2929 * respectively which uses this CQ.
2931 spin_lock_irqsave(&dev
->flush_q_lock
, flags
);
2932 list_for_each_entry(qp
, &cq
->sq_head
, sq_entry
) {
2933 if (cqes_to_poll
== 0)
2935 err_cqes
= ocrdma_add_err_cqe(cq
, cqes_to_poll
, qp
, wc
);
2936 cqes_to_poll
-= err_cqes
;
2937 num_os_cqe
+= err_cqes
;
2940 spin_unlock_irqrestore(&dev
->flush_q_lock
, flags
);
2945 int ocrdma_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags cq_flags
)
2947 struct ocrdma_cq
*cq
= get_ocrdma_cq(ibcq
);
2948 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibcq
->device
);
2950 unsigned long flags
;
2951 bool arm_needed
= false, sol_needed
= false;
2955 spin_lock_irqsave(&cq
->cq_lock
, flags
);
2956 if (cq_flags
& IB_CQ_NEXT_COMP
|| cq_flags
& IB_CQ_SOLICITED
)
2958 if (cq_flags
& IB_CQ_SOLICITED
)
2961 ocrdma_ring_cq_db(dev
, cq_id
, arm_needed
, sol_needed
, 0);
2962 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
2967 struct ib_mr
*ocrdma_alloc_mr(struct ib_pd
*ibpd
,
2968 enum ib_mr_type mr_type
,
2972 struct ocrdma_mr
*mr
;
2973 struct ocrdma_pd
*pd
= get_ocrdma_pd(ibpd
);
2974 struct ocrdma_dev
*dev
= get_ocrdma_dev(ibpd
->device
);
2976 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
2977 return ERR_PTR(-EINVAL
);
2979 if (max_num_sg
> dev
->attr
.max_pages_per_frmr
)
2980 return ERR_PTR(-EINVAL
);
2982 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
2984 return ERR_PTR(-ENOMEM
);
2986 mr
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
2992 status
= ocrdma_get_pbl_info(dev
, mr
, max_num_sg
);
2996 mr
->hwmr
.remote_rd
= 0;
2997 mr
->hwmr
.remote_wr
= 0;
2998 mr
->hwmr
.local_rd
= 0;
2999 mr
->hwmr
.local_wr
= 0;
3000 mr
->hwmr
.mw_bind
= 0;
3001 status
= ocrdma_build_pbl_tbl(dev
, &mr
->hwmr
);
3004 status
= ocrdma_reg_mr(dev
, &mr
->hwmr
, pd
->id
, 0);
3007 mr
->ibmr
.rkey
= mr
->hwmr
.lkey
;
3008 mr
->ibmr
.lkey
= mr
->hwmr
.lkey
;
3009 dev
->stag_arr
[(mr
->hwmr
.lkey
>> 8) & (OCRDMA_MAX_STAG
- 1)] =
3013 ocrdma_free_mr_pbl_tbl(dev
, &mr
->hwmr
);
3018 return ERR_PTR(-ENOMEM
);
3021 static int ocrdma_set_page(struct ib_mr
*ibmr
, u64 addr
)
3023 struct ocrdma_mr
*mr
= get_ocrdma_mr(ibmr
);
3025 if (unlikely(mr
->npages
== mr
->hwmr
.num_pbes
))
3028 mr
->pages
[mr
->npages
++] = addr
;
3033 int ocrdma_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
3034 unsigned int *sg_offset
)
3036 struct ocrdma_mr
*mr
= get_ocrdma_mr(ibmr
);
3040 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, ocrdma_set_page
);