1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/random.h>
38 #include <linux/highmem.h>
39 #include <linux/time.h>
40 #include <linux/hugetlb.h>
41 #include <asm/byteorder.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/iw_cm.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/ib_umem.h>
50 * i40iw_query_device - get device attributes
51 * @ibdev: device pointer from stack
52 * @props: returning device attributes
55 static int i40iw_query_device(struct ib_device
*ibdev
,
56 struct ib_device_attr
*props
,
57 struct ib_udata
*udata
)
59 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
61 if (udata
->inlen
|| udata
->outlen
)
63 memset(props
, 0, sizeof(*props
));
64 ether_addr_copy((u8
*)&props
->sys_image_guid
, iwdev
->netdev
->dev_addr
);
65 props
->fw_ver
= I40IW_FW_VERSION
;
66 props
->device_cap_flags
= iwdev
->device_cap_flags
;
67 props
->vendor_id
= iwdev
->ldev
->pcidev
->vendor
;
68 props
->vendor_part_id
= iwdev
->ldev
->pcidev
->device
;
69 props
->hw_ver
= (u32
)iwdev
->sc_dev
.hw_rev
;
70 props
->max_mr_size
= I40IW_MAX_OUTBOUND_MESSAGE_SIZE
;
71 props
->max_qp
= iwdev
->max_qp
- iwdev
->used_qps
;
72 props
->max_qp_wr
= (I40IW_MAX_WQ_ENTRIES
>> 2) - 1;
73 props
->max_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
74 props
->max_cq
= iwdev
->max_cq
- iwdev
->used_cqs
;
75 props
->max_cqe
= iwdev
->max_cqe
;
76 props
->max_mr
= iwdev
->max_mr
- iwdev
->used_mrs
;
77 props
->max_pd
= iwdev
->max_pd
- iwdev
->used_pds
;
78 props
->max_sge_rd
= I40IW_MAX_SGE_RD
;
79 props
->max_qp_rd_atom
= I40IW_MAX_IRD_SIZE
;
80 props
->max_qp_init_rd_atom
= props
->max_qp_rd_atom
;
81 props
->atomic_cap
= IB_ATOMIC_NONE
;
82 props
->max_map_per_fmr
= 1;
83 props
->max_fast_reg_page_list_len
= I40IW_MAX_PAGES_PER_FMR
;
88 * i40iw_query_port - get port attrubutes
89 * @ibdev: device pointer from stack
90 * @port: port number for query
91 * @props: returning device attributes
93 static int i40iw_query_port(struct ib_device
*ibdev
,
95 struct ib_port_attr
*props
)
97 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
98 struct net_device
*netdev
= iwdev
->netdev
;
100 memset(props
, 0, sizeof(*props
));
102 props
->max_mtu
= IB_MTU_4096
;
103 if (netdev
->mtu
>= 4096)
104 props
->active_mtu
= IB_MTU_4096
;
105 else if (netdev
->mtu
>= 2048)
106 props
->active_mtu
= IB_MTU_2048
;
107 else if (netdev
->mtu
>= 1024)
108 props
->active_mtu
= IB_MTU_1024
;
109 else if (netdev
->mtu
>= 512)
110 props
->active_mtu
= IB_MTU_512
;
112 props
->active_mtu
= IB_MTU_256
;
115 if (netif_carrier_ok(iwdev
->netdev
))
116 props
->state
= IB_PORT_ACTIVE
;
118 props
->state
= IB_PORT_DOWN
;
119 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
120 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
121 props
->gid_tbl_len
= 1;
122 props
->pkey_tbl_len
= 1;
123 props
->active_width
= IB_WIDTH_4X
;
124 props
->active_speed
= 1;
125 props
->max_msg_sz
= I40IW_MAX_OUTBOUND_MESSAGE_SIZE
;
130 * i40iw_alloc_ucontext - Allocate the user context data structure
131 * @ibdev: device pointer from stack
134 * This keeps track of all objects associated with a particular
137 static struct ib_ucontext
*i40iw_alloc_ucontext(struct ib_device
*ibdev
,
138 struct ib_udata
*udata
)
140 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
141 struct i40iw_alloc_ucontext_req req
;
142 struct i40iw_alloc_ucontext_resp uresp
;
143 struct i40iw_ucontext
*ucontext
;
145 if (ib_copy_from_udata(&req
, udata
, sizeof(req
)))
146 return ERR_PTR(-EINVAL
);
148 if (req
.userspace_ver
!= I40IW_ABI_USERSPACE_VER
) {
149 i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
150 req
.userspace_ver
, I40IW_ABI_USERSPACE_VER
);
151 return ERR_PTR(-EINVAL
);
154 memset(&uresp
, 0, sizeof(uresp
));
155 uresp
.max_qps
= iwdev
->max_qp
;
156 uresp
.max_pds
= iwdev
->max_pd
;
157 uresp
.wq_size
= iwdev
->max_qp_wr
* 2;
158 uresp
.kernel_ver
= I40IW_ABI_KERNEL_VER
;
160 ucontext
= kzalloc(sizeof(*ucontext
), GFP_KERNEL
);
162 return ERR_PTR(-ENOMEM
);
164 ucontext
->iwdev
= iwdev
;
166 if (ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
))) {
168 return ERR_PTR(-EFAULT
);
171 INIT_LIST_HEAD(&ucontext
->cq_reg_mem_list
);
172 spin_lock_init(&ucontext
->cq_reg_mem_list_lock
);
173 INIT_LIST_HEAD(&ucontext
->qp_reg_mem_list
);
174 spin_lock_init(&ucontext
->qp_reg_mem_list_lock
);
176 return &ucontext
->ibucontext
;
180 * i40iw_dealloc_ucontext - deallocate the user context data structure
181 * @context: user context created during alloc
183 static int i40iw_dealloc_ucontext(struct ib_ucontext
*context
)
185 struct i40iw_ucontext
*ucontext
= to_ucontext(context
);
188 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
189 if (!list_empty(&ucontext
->cq_reg_mem_list
)) {
190 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
193 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
194 spin_lock_irqsave(&ucontext
->qp_reg_mem_list_lock
, flags
);
195 if (!list_empty(&ucontext
->qp_reg_mem_list
)) {
196 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
199 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
206 * i40iw_mmap - user memory map
207 * @context: context created during alloc
208 * @vma: kernel info for user memory map
210 static int i40iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
212 struct i40iw_ucontext
*ucontext
;
216 ucontext
= to_ucontext(context
);
217 if (ucontext
->iwdev
->sc_dev
.is_pf
) {
218 db_addr_offset
= I40IW_DB_ADDR_OFFSET
;
219 push_offset
= I40IW_PUSH_OFFSET
;
221 vma
->vm_pgoff
+= I40IW_PF_FIRST_PUSH_PAGE_INDEX
- 1;
223 db_addr_offset
= I40IW_VF_DB_ADDR_OFFSET
;
224 push_offset
= I40IW_VF_PUSH_OFFSET
;
226 vma
->vm_pgoff
+= I40IW_VF_FIRST_PUSH_PAGE_INDEX
- 1;
229 vma
->vm_pgoff
+= db_addr_offset
>> PAGE_SHIFT
;
231 if (vma
->vm_pgoff
== (db_addr_offset
>> PAGE_SHIFT
)) {
232 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
233 vma
->vm_private_data
= ucontext
;
235 if ((vma
->vm_pgoff
- (push_offset
>> PAGE_SHIFT
)) % 2)
236 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
238 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
241 if (io_remap_pfn_range(vma
, vma
->vm_start
,
242 vma
->vm_pgoff
+ (pci_resource_start(ucontext
->iwdev
->ldev
->pcidev
, 0) >> PAGE_SHIFT
),
243 PAGE_SIZE
, vma
->vm_page_prot
))
250 * i40iw_alloc_push_page - allocate a push page for qp
251 * @iwdev: iwarp device
252 * @qp: hardware control qp
254 static void i40iw_alloc_push_page(struct i40iw_device
*iwdev
, struct i40iw_sc_qp
*qp
)
256 struct i40iw_cqp_request
*cqp_request
;
257 struct cqp_commands_info
*cqp_info
;
258 enum i40iw_status_code status
;
260 if (qp
->push_idx
!= I40IW_INVALID_PUSH_PAGE_INDEX
)
263 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
267 atomic_inc(&cqp_request
->refcount
);
269 cqp_info
= &cqp_request
->info
;
270 cqp_info
->cqp_cmd
= OP_MANAGE_PUSH_PAGE
;
271 cqp_info
->post_sq
= 1;
273 cqp_info
->in
.u
.manage_push_page
.info
.qs_handle
= qp
->qs_handle
;
274 cqp_info
->in
.u
.manage_push_page
.info
.free_page
= 0;
275 cqp_info
->in
.u
.manage_push_page
.cqp
= &iwdev
->cqp
.sc_cqp
;
276 cqp_info
->in
.u
.manage_push_page
.scratch
= (uintptr_t)cqp_request
;
278 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
280 qp
->push_idx
= cqp_request
->compl_info
.op_ret_val
;
282 i40iw_pr_err("CQP-OP Push page fail");
283 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
287 * i40iw_dealloc_push_page - free a push page for qp
288 * @iwdev: iwarp device
289 * @qp: hardware control qp
291 static void i40iw_dealloc_push_page(struct i40iw_device
*iwdev
, struct i40iw_sc_qp
*qp
)
293 struct i40iw_cqp_request
*cqp_request
;
294 struct cqp_commands_info
*cqp_info
;
295 enum i40iw_status_code status
;
297 if (qp
->push_idx
== I40IW_INVALID_PUSH_PAGE_INDEX
)
300 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, false);
304 cqp_info
= &cqp_request
->info
;
305 cqp_info
->cqp_cmd
= OP_MANAGE_PUSH_PAGE
;
306 cqp_info
->post_sq
= 1;
308 cqp_info
->in
.u
.manage_push_page
.info
.push_idx
= qp
->push_idx
;
309 cqp_info
->in
.u
.manage_push_page
.info
.qs_handle
= qp
->qs_handle
;
310 cqp_info
->in
.u
.manage_push_page
.info
.free_page
= 1;
311 cqp_info
->in
.u
.manage_push_page
.cqp
= &iwdev
->cqp
.sc_cqp
;
312 cqp_info
->in
.u
.manage_push_page
.scratch
= (uintptr_t)cqp_request
;
314 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
316 qp
->push_idx
= I40IW_INVALID_PUSH_PAGE_INDEX
;
318 i40iw_pr_err("CQP-OP Push page fail");
322 * i40iw_alloc_pd - allocate protection domain
323 * @ibdev: device pointer from stack
324 * @context: user context created during alloc
327 static struct ib_pd
*i40iw_alloc_pd(struct ib_device
*ibdev
,
328 struct ib_ucontext
*context
,
329 struct ib_udata
*udata
)
331 struct i40iw_pd
*iwpd
;
332 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
333 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
334 struct i40iw_alloc_pd_resp uresp
;
335 struct i40iw_sc_pd
*sc_pd
;
340 return ERR_PTR(-ENODEV
);
342 err
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_pds
,
343 iwdev
->max_pd
, &pd_id
, &iwdev
->next_pd
);
345 i40iw_pr_err("alloc resource failed\n");
349 iwpd
= kzalloc(sizeof(*iwpd
), GFP_KERNEL
);
355 sc_pd
= &iwpd
->sc_pd
;
356 dev
->iw_pd_ops
->pd_init(dev
, sc_pd
, pd_id
);
359 memset(&uresp
, 0, sizeof(uresp
));
361 if (ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
))) {
367 i40iw_add_pdusecount(iwpd
);
372 i40iw_free_resource(iwdev
, iwdev
->allocated_pds
, pd_id
);
377 * i40iw_dealloc_pd - deallocate pd
378 * @ibpd: ptr of pd to be deallocated
380 static int i40iw_dealloc_pd(struct ib_pd
*ibpd
)
382 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
383 struct i40iw_device
*iwdev
= to_iwdev(ibpd
->device
);
385 i40iw_rem_pdusecount(iwpd
, iwdev
);
390 * i40iw_qp_roundup - return round up qp ring size
391 * @wr_ring_size: ring size to round up
393 static int i40iw_qp_roundup(u32 wr_ring_size
)
397 if (wr_ring_size
< I40IWQP_SW_MIN_WQSIZE
)
398 wr_ring_size
= I40IWQP_SW_MIN_WQSIZE
;
400 for (wr_ring_size
--; scount
<= 16; scount
*= 2)
401 wr_ring_size
|= wr_ring_size
>> scount
;
402 return ++wr_ring_size
;
406 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
408 * @va: user virtual address
409 * @pbl_list: pbl list to search in (QP's or CQ's)
411 static struct i40iw_pbl
*i40iw_get_pbl(unsigned long va
,
412 struct list_head
*pbl_list
)
414 struct i40iw_pbl
*iwpbl
;
416 list_for_each_entry(iwpbl
, pbl_list
, list
) {
417 if (iwpbl
->user_base
== va
) {
418 list_del(&iwpbl
->list
);
426 * i40iw_free_qp_resources - free up memory resources for qp
427 * @iwdev: iwarp device
428 * @iwqp: qp ptr (user or kernel)
429 * @qp_num: qp number assigned
431 void i40iw_free_qp_resources(struct i40iw_device
*iwdev
,
432 struct i40iw_qp
*iwqp
,
435 i40iw_dealloc_push_page(iwdev
, &iwqp
->sc_qp
);
437 i40iw_free_resource(iwdev
, iwdev
->allocated_qps
, qp_num
);
438 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwqp
->q2_ctx_mem
);
439 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwqp
->kqp
.dma_mem
);
440 kfree(iwqp
->kqp
.wrid_mem
);
441 iwqp
->kqp
.wrid_mem
= NULL
;
442 kfree(iwqp
->allocated_buffer
);
446 * i40iw_clean_cqes - clean cq entries for qp
447 * @iwqp: qp ptr (user or kernel)
450 static void i40iw_clean_cqes(struct i40iw_qp
*iwqp
, struct i40iw_cq
*iwcq
)
452 struct i40iw_cq_uk
*ukcq
= &iwcq
->sc_cq
.cq_uk
;
454 ukcq
->ops
.iw_cq_clean(&iwqp
->sc_qp
.qp_uk
, ukcq
);
458 * i40iw_destroy_qp - destroy qp
459 * @ibqp: qp's ib pointer also to get to device's qp address
461 static int i40iw_destroy_qp(struct ib_qp
*ibqp
)
463 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
467 if (iwqp
->ibqp_state
>= IB_QPS_INIT
&& iwqp
->ibqp_state
< IB_QPS_RTS
)
468 i40iw_next_iw_state(iwqp
, I40IW_QP_STATE_ERROR
, 0, 0, 0);
470 if (!iwqp
->user_mode
) {
472 i40iw_clean_cqes(iwqp
, iwqp
->iwscq
);
473 if (iwqp
->iwrcq
!= iwqp
->iwscq
)
474 i40iw_clean_cqes(iwqp
, iwqp
->iwrcq
);
478 i40iw_rem_ref(&iwqp
->ibqp
);
483 * i40iw_setup_virt_qp - setup for allocation of virtual qp
486 * @init_info: initialize info to return
488 static int i40iw_setup_virt_qp(struct i40iw_device
*iwdev
,
489 struct i40iw_qp
*iwqp
,
490 struct i40iw_qp_init_info
*init_info
)
492 struct i40iw_pbl
*iwpbl
= iwqp
->iwpbl
;
493 struct i40iw_qp_mr
*qpmr
= &iwpbl
->qp_mr
;
495 iwqp
->page
= qpmr
->sq_page
;
496 init_info
->shadow_area_pa
= cpu_to_le64(qpmr
->shadow
);
497 if (iwpbl
->pbl_allocated
) {
498 init_info
->virtual_map
= true;
499 init_info
->sq_pa
= qpmr
->sq_pbl
.idx
;
500 init_info
->rq_pa
= qpmr
->rq_pbl
.idx
;
502 init_info
->sq_pa
= qpmr
->sq_pbl
.addr
;
503 init_info
->rq_pa
= qpmr
->rq_pbl
.addr
;
509 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
510 * @iwdev: iwarp device
511 * @iwqp: qp ptr (user or kernel)
512 * @info: initialize info to return
514 static int i40iw_setup_kmode_qp(struct i40iw_device
*iwdev
,
515 struct i40iw_qp
*iwqp
,
516 struct i40iw_qp_init_info
*info
)
518 struct i40iw_dma_mem
*mem
= &iwqp
->kqp
.dma_mem
;
519 u32 sqdepth
, rqdepth
;
520 u32 sq_size
, rq_size
;
523 enum i40iw_status_code status
;
524 struct i40iw_qp_uk_init_info
*ukinfo
= &info
->qp_uk_init_info
;
526 sq_size
= i40iw_qp_roundup(ukinfo
->sq_size
+ 1);
527 rq_size
= i40iw_qp_roundup(ukinfo
->rq_size
+ 1);
529 status
= i40iw_get_wqe_shift(sq_size
, ukinfo
->max_sq_frag_cnt
, ukinfo
->max_inline_data
, &sqshift
);
531 status
= i40iw_get_wqe_shift(rq_size
, ukinfo
->max_rq_frag_cnt
, 0, &rqshift
);
536 sqdepth
= sq_size
<< sqshift
;
537 rqdepth
= rq_size
<< rqshift
;
539 size
= sqdepth
* sizeof(struct i40iw_sq_uk_wr_trk_info
) + (rqdepth
<< 3);
540 iwqp
->kqp
.wrid_mem
= kzalloc(size
, GFP_KERNEL
);
542 ukinfo
->sq_wrtrk_array
= (struct i40iw_sq_uk_wr_trk_info
*)iwqp
->kqp
.wrid_mem
;
543 if (!ukinfo
->sq_wrtrk_array
)
546 ukinfo
->rq_wrid_array
= (u64
*)&ukinfo
->sq_wrtrk_array
[sqdepth
];
548 size
= (sqdepth
+ rqdepth
) * I40IW_QP_WQE_MIN_SIZE
;
549 size
+= (I40IW_SHADOW_AREA_SIZE
<< 3);
551 status
= i40iw_allocate_dma_mem(iwdev
->sc_dev
.hw
, mem
, size
, 256);
553 kfree(ukinfo
->sq_wrtrk_array
);
554 ukinfo
->sq_wrtrk_array
= NULL
;
558 ukinfo
->sq
= mem
->va
;
559 info
->sq_pa
= mem
->pa
;
561 ukinfo
->rq
= &ukinfo
->sq
[sqdepth
];
562 info
->rq_pa
= info
->sq_pa
+ (sqdepth
* I40IW_QP_WQE_MIN_SIZE
);
564 ukinfo
->shadow_area
= ukinfo
->rq
[rqdepth
].elem
;
565 info
->shadow_area_pa
= info
->rq_pa
+ (rqdepth
* I40IW_QP_WQE_MIN_SIZE
);
567 ukinfo
->sq_size
= sq_size
;
568 ukinfo
->rq_size
= rq_size
;
569 ukinfo
->qp_id
= iwqp
->ibqp
.qp_num
;
574 * i40iw_create_qp - create qp
576 * @init_attr: attributes for qp
577 * @udata: user data for create qp
579 static struct ib_qp
*i40iw_create_qp(struct ib_pd
*ibpd
,
580 struct ib_qp_init_attr
*init_attr
,
581 struct ib_udata
*udata
)
583 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
584 struct i40iw_device
*iwdev
= to_iwdev(ibpd
->device
);
585 struct i40iw_cqp
*iwcqp
= &iwdev
->cqp
;
586 struct i40iw_qp
*iwqp
;
587 struct i40iw_ucontext
*ucontext
;
588 struct i40iw_create_qp_req req
;
589 struct i40iw_create_qp_resp uresp
;
592 enum i40iw_status_code ret
;
596 struct i40iw_sc_qp
*qp
;
597 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
598 struct i40iw_qp_init_info init_info
;
599 struct i40iw_create_qp_info
*qp_info
;
600 struct i40iw_cqp_request
*cqp_request
;
601 struct cqp_commands_info
*cqp_info
;
603 struct i40iw_qp_host_ctx_info
*ctx_info
;
604 struct i40iwarp_offload_info
*iwarp_info
;
608 return ERR_PTR(-ENODEV
);
610 if (init_attr
->create_flags
)
611 return ERR_PTR(-EINVAL
);
612 if (init_attr
->cap
.max_inline_data
> I40IW_MAX_INLINE_DATA_SIZE
)
613 init_attr
->cap
.max_inline_data
= I40IW_MAX_INLINE_DATA_SIZE
;
615 if (init_attr
->cap
.max_send_sge
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
616 init_attr
->cap
.max_send_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
618 if (init_attr
->cap
.max_recv_sge
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
619 init_attr
->cap
.max_recv_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
621 memset(&init_info
, 0, sizeof(init_info
));
623 sq_size
= init_attr
->cap
.max_send_wr
;
624 rq_size
= init_attr
->cap
.max_recv_wr
;
626 init_info
.vsi
= &iwdev
->vsi
;
627 init_info
.qp_uk_init_info
.sq_size
= sq_size
;
628 init_info
.qp_uk_init_info
.rq_size
= rq_size
;
629 init_info
.qp_uk_init_info
.max_sq_frag_cnt
= init_attr
->cap
.max_send_sge
;
630 init_info
.qp_uk_init_info
.max_rq_frag_cnt
= init_attr
->cap
.max_recv_sge
;
631 init_info
.qp_uk_init_info
.max_inline_data
= init_attr
->cap
.max_inline_data
;
633 mem
= kzalloc(sizeof(*iwqp
), GFP_KERNEL
);
635 return ERR_PTR(-ENOMEM
);
637 iwqp
= (struct i40iw_qp
*)mem
;
639 qp
->back_qp
= (void *)iwqp
;
640 qp
->push_idx
= I40IW_INVALID_PUSH_PAGE_INDEX
;
642 iwqp
->ctx_info
.iwarp_info
= &iwqp
->iwarp_info
;
644 if (i40iw_allocate_dma_mem(dev
->hw
,
646 I40IW_Q2_BUFFER_SIZE
+ I40IW_QP_CTX_SIZE
,
648 i40iw_pr_err("dma_mem failed\n");
653 init_info
.q2
= iwqp
->q2_ctx_mem
.va
;
654 init_info
.q2_pa
= iwqp
->q2_ctx_mem
.pa
;
656 init_info
.host_ctx
= (void *)init_info
.q2
+ I40IW_Q2_BUFFER_SIZE
;
657 init_info
.host_ctx_pa
= init_info
.q2_pa
+ I40IW_Q2_BUFFER_SIZE
;
659 err_code
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_qps
, iwdev
->max_qp
,
660 &qp_num
, &iwdev
->next_qp
);
662 i40iw_pr_err("qp resource\n");
666 iwqp
->allocated_buffer
= mem
;
669 iwqp
->ibqp
.qp_num
= qp_num
;
671 iwqp
->iwscq
= to_iwcq(init_attr
->send_cq
);
672 iwqp
->iwrcq
= to_iwcq(init_attr
->recv_cq
);
674 iwqp
->host_ctx
.va
= init_info
.host_ctx
;
675 iwqp
->host_ctx
.pa
= init_info
.host_ctx_pa
;
676 iwqp
->host_ctx
.size
= I40IW_QP_CTX_SIZE
;
678 init_info
.pd
= &iwpd
->sc_pd
;
679 init_info
.qp_uk_init_info
.qp_id
= iwqp
->ibqp
.qp_num
;
680 iwqp
->ctx_info
.qp_compl_ctx
= (uintptr_t)qp
;
682 if (init_attr
->qp_type
!= IB_QPT_RC
) {
686 if (iwdev
->push_mode
)
687 i40iw_alloc_push_page(iwdev
, qp
);
689 err_code
= ib_copy_from_udata(&req
, udata
, sizeof(req
));
691 i40iw_pr_err("ib_copy_from_data\n");
694 iwqp
->ctx_info
.qp_compl_ctx
= req
.user_compl_ctx
;
695 if (ibpd
->uobject
&& ibpd
->uobject
->context
) {
697 ucontext
= to_ucontext(ibpd
->uobject
->context
);
699 if (req
.user_wqe_buffers
) {
701 &ucontext
->qp_reg_mem_list_lock
, flags
);
702 iwqp
->iwpbl
= i40iw_get_pbl(
703 (unsigned long)req
.user_wqe_buffers
,
704 &ucontext
->qp_reg_mem_list
);
705 spin_unlock_irqrestore(
706 &ucontext
->qp_reg_mem_list_lock
, flags
);
710 i40iw_pr_err("no pbl info\n");
715 err_code
= i40iw_setup_virt_qp(iwdev
, iwqp
, &init_info
);
717 err_code
= i40iw_setup_kmode_qp(iwdev
, iwqp
, &init_info
);
721 i40iw_pr_err("setup qp failed\n");
725 init_info
.type
= I40IW_QP_TYPE_IWARP
;
726 ret
= dev
->iw_priv_qp_ops
->qp_init(qp
, &init_info
);
729 i40iw_pr_err("qp_init fail\n");
732 ctx_info
= &iwqp
->ctx_info
;
733 iwarp_info
= &iwqp
->iwarp_info
;
734 iwarp_info
->rd_enable
= true;
735 iwarp_info
->wr_rdresp_en
= true;
736 if (!iwqp
->user_mode
) {
737 iwarp_info
->fast_reg_en
= true;
738 iwarp_info
->priv_mode_en
= true;
740 iwarp_info
->ddp_ver
= 1;
741 iwarp_info
->rdmap_ver
= 1;
743 ctx_info
->iwarp_info_valid
= true;
744 ctx_info
->send_cq_num
= iwqp
->iwscq
->sc_cq
.cq_uk
.cq_id
;
745 ctx_info
->rcv_cq_num
= iwqp
->iwrcq
->sc_cq
.cq_uk
.cq_id
;
746 if (qp
->push_idx
== I40IW_INVALID_PUSH_PAGE_INDEX
) {
747 ctx_info
->push_mode_en
= false;
749 ctx_info
->push_mode_en
= true;
750 ctx_info
->push_idx
= qp
->push_idx
;
753 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
754 (u64
*)iwqp
->host_ctx
.va
,
756 ctx_info
->iwarp_info_valid
= false;
757 cqp_request
= i40iw_get_cqp_request(iwcqp
, true);
762 cqp_info
= &cqp_request
->info
;
763 qp_info
= &cqp_request
->info
.in
.u
.qp_create
.info
;
765 memset(qp_info
, 0, sizeof(*qp_info
));
767 qp_info
->cq_num_valid
= true;
768 qp_info
->next_iwarp_state
= I40IW_QP_STATE_IDLE
;
770 cqp_info
->cqp_cmd
= OP_QP_CREATE
;
771 cqp_info
->post_sq
= 1;
772 cqp_info
->in
.u
.qp_create
.qp
= qp
;
773 cqp_info
->in
.u
.qp_create
.scratch
= (uintptr_t)cqp_request
;
774 ret
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
776 i40iw_pr_err("CQP-OP QP create fail");
781 i40iw_add_ref(&iwqp
->ibqp
);
782 spin_lock_init(&iwqp
->lock
);
783 iwqp
->sig_all
= (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ? 1 : 0;
784 iwdev
->qp_table
[qp_num
] = iwqp
;
785 i40iw_add_pdusecount(iwqp
->iwpd
);
786 i40iw_add_devusecount(iwdev
);
787 if (ibpd
->uobject
&& udata
) {
788 memset(&uresp
, 0, sizeof(uresp
));
789 uresp
.actual_sq_size
= sq_size
;
790 uresp
.actual_rq_size
= rq_size
;
791 uresp
.qp_id
= qp_num
;
792 uresp
.push_idx
= qp
->push_idx
;
793 err_code
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
795 i40iw_pr_err("copy_to_udata failed\n");
796 i40iw_destroy_qp(&iwqp
->ibqp
);
797 /* let the completion of the qp destroy free the qp */
798 return ERR_PTR(err_code
);
801 init_completion(&iwqp
->sq_drained
);
802 init_completion(&iwqp
->rq_drained
);
806 i40iw_free_qp_resources(iwdev
, iwqp
, qp_num
);
807 return ERR_PTR(err_code
);
811 * i40iw_query - query qp attributes
813 * @attr: attributes pointer
814 * @attr_mask: Not used
815 * @init_attr: qp attributes to return
817 static int i40iw_query_qp(struct ib_qp
*ibqp
,
818 struct ib_qp_attr
*attr
,
820 struct ib_qp_init_attr
*init_attr
)
822 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
823 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
825 attr
->qp_access_flags
= 0;
826 attr
->cap
.max_send_wr
= qp
->qp_uk
.sq_size
;
827 attr
->cap
.max_recv_wr
= qp
->qp_uk
.rq_size
;
828 attr
->cap
.max_inline_data
= I40IW_MAX_INLINE_DATA_SIZE
;
829 attr
->cap
.max_send_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
830 attr
->cap
.max_recv_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
831 init_attr
->event_handler
= iwqp
->ibqp
.event_handler
;
832 init_attr
->qp_context
= iwqp
->ibqp
.qp_context
;
833 init_attr
->send_cq
= iwqp
->ibqp
.send_cq
;
834 init_attr
->recv_cq
= iwqp
->ibqp
.recv_cq
;
835 init_attr
->srq
= iwqp
->ibqp
.srq
;
836 init_attr
->cap
= attr
->cap
;
841 * i40iw_hw_modify_qp - setup cqp for modify qp
842 * @iwdev: iwarp device
843 * @iwqp: qp ptr (user or kernel)
844 * @info: info for modify qp
845 * @wait: flag to wait or not for modify qp completion
847 void i40iw_hw_modify_qp(struct i40iw_device
*iwdev
, struct i40iw_qp
*iwqp
,
848 struct i40iw_modify_qp_info
*info
, bool wait
)
850 enum i40iw_status_code status
;
851 struct i40iw_cqp_request
*cqp_request
;
852 struct cqp_commands_info
*cqp_info
;
853 struct i40iw_modify_qp_info
*m_info
;
855 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
859 cqp_info
= &cqp_request
->info
;
860 m_info
= &cqp_info
->in
.u
.qp_modify
.info
;
861 memcpy(m_info
, info
, sizeof(*m_info
));
862 cqp_info
->cqp_cmd
= OP_QP_MODIFY
;
863 cqp_info
->post_sq
= 1;
864 cqp_info
->in
.u
.qp_modify
.qp
= &iwqp
->sc_qp
;
865 cqp_info
->in
.u
.qp_modify
.scratch
= (uintptr_t)cqp_request
;
866 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
868 i40iw_pr_err("CQP-OP Modify QP fail");
872 * i40iw_modify_qp - modify qp request
873 * @ibqp: qp's pointer for modify
874 * @attr: access attributes
875 * @attr_mask: state mask
878 int i40iw_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
879 int attr_mask
, struct ib_udata
*udata
)
881 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
882 struct i40iw_device
*iwdev
= iwqp
->iwdev
;
883 struct i40iw_qp_host_ctx_info
*ctx_info
;
884 struct i40iwarp_offload_info
*iwarp_info
;
885 struct i40iw_modify_qp_info info
;
886 u8 issue_modify_qp
= 0;
891 memset(&info
, 0, sizeof(info
));
892 ctx_info
= &iwqp
->ctx_info
;
893 iwarp_info
= &iwqp
->iwarp_info
;
895 spin_lock_irqsave(&iwqp
->lock
, flags
);
897 if (attr_mask
& IB_QP_STATE
) {
898 if (iwdev
->closing
&& attr
->qp_state
!= IB_QPS_ERR
) {
903 switch (attr
->qp_state
) {
906 if (iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_IDLE
) {
910 if (iwqp
->iwarp_state
== I40IW_QP_STATE_INVALID
) {
911 info
.next_iwarp_state
= I40IW_QP_STATE_IDLE
;
916 if ((iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_RTS
) ||
923 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_ESTABLISHED
;
925 info
.next_iwarp_state
= I40IW_QP_STATE_RTS
;
926 info
.tcp_ctx_valid
= true;
927 info
.ord_valid
= true;
928 info
.arp_cache_idx_valid
= true;
929 info
.cq_num_valid
= true;
932 if (iwqp
->hw_iwarp_state
> (u32
)I40IW_QP_STATE_RTS
) {
936 if ((iwqp
->iwarp_state
== (u32
)I40IW_QP_STATE_CLOSING
) ||
937 (iwqp
->iwarp_state
< (u32
)I40IW_QP_STATE_RTS
)) {
941 if (iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_CLOSING
) {
945 info
.next_iwarp_state
= I40IW_QP_STATE_CLOSING
;
949 if (iwqp
->iwarp_state
>= (u32
)I40IW_QP_STATE_TERMINATE
) {
953 info
.next_iwarp_state
= I40IW_QP_STATE_TERMINATE
;
958 if (iwqp
->iwarp_state
== (u32
)I40IW_QP_STATE_ERROR
) {
962 if (iwqp
->sc_qp
.term_flags
)
963 i40iw_terminate_del_timer(&iwqp
->sc_qp
);
964 info
.next_iwarp_state
= I40IW_QP_STATE_ERROR
;
965 if ((iwqp
->hw_tcp_state
> I40IW_TCP_STATE_CLOSED
) &&
967 (iwqp
->hw_tcp_state
!= I40IW_TCP_STATE_TIME_WAIT
))
968 info
.reset_tcp_conn
= true;
972 info
.next_iwarp_state
= I40IW_QP_STATE_ERROR
;
979 iwqp
->ibqp_state
= attr
->qp_state
;
982 iwqp
->iwarp_state
= info
.next_iwarp_state
;
984 info
.next_iwarp_state
= iwqp
->iwarp_state
;
986 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
987 ctx_info
->iwarp_info_valid
= true;
988 if (attr
->qp_access_flags
& IB_ACCESS_LOCAL_WRITE
)
989 iwarp_info
->wr_rdresp_en
= true;
990 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
991 iwarp_info
->wr_rdresp_en
= true;
992 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
993 iwarp_info
->rd_enable
= true;
994 if (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
)
995 iwarp_info
->bind_en
= true;
997 if (iwqp
->user_mode
) {
998 iwarp_info
->rd_enable
= true;
999 iwarp_info
->wr_rdresp_en
= true;
1000 iwarp_info
->priv_mode_en
= false;
1004 if (ctx_info
->iwarp_info_valid
) {
1005 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
1008 ctx_info
->send_cq_num
= iwqp
->iwscq
->sc_cq
.cq_uk
.cq_id
;
1009 ctx_info
->rcv_cq_num
= iwqp
->iwrcq
->sc_cq
.cq_uk
.cq_id
;
1010 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
1011 (u64
*)iwqp
->host_ctx
.va
,
1014 i40iw_pr_err("setting QP context\n");
1020 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
1022 if (issue_modify_qp
)
1023 i40iw_hw_modify_qp(iwdev
, iwqp
, &info
, true);
1025 if (issue_modify_qp
&& (iwqp
->ibqp_state
> IB_QPS_RTS
)) {
1027 if (iwqp
->cm_id
&& iwqp
->hw_tcp_state
) {
1028 spin_lock_irqsave(&iwqp
->lock
, flags
);
1029 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_CLOSED
;
1030 iwqp
->last_aeq
= I40IW_AE_RESET_SENT
;
1031 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
1037 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
1042 * cq_free_resources - free up recources for cq
1043 * @iwdev: iwarp device
1046 static void cq_free_resources(struct i40iw_device
*iwdev
, struct i40iw_cq
*iwcq
)
1048 struct i40iw_sc_cq
*cq
= &iwcq
->sc_cq
;
1050 if (!iwcq
->user_mode
)
1051 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwcq
->kmem
);
1052 i40iw_free_resource(iwdev
, iwdev
->allocated_cqs
, cq
->cq_uk
.cq_id
);
1056 * i40iw_cq_wq_destroy - send cq destroy cqp
1057 * @iwdev: iwarp device
1058 * @cq: hardware control cq
1060 void i40iw_cq_wq_destroy(struct i40iw_device
*iwdev
, struct i40iw_sc_cq
*cq
)
1062 enum i40iw_status_code status
;
1063 struct i40iw_cqp_request
*cqp_request
;
1064 struct cqp_commands_info
*cqp_info
;
1066 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1070 cqp_info
= &cqp_request
->info
;
1072 cqp_info
->cqp_cmd
= OP_CQ_DESTROY
;
1073 cqp_info
->post_sq
= 1;
1074 cqp_info
->in
.u
.cq_destroy
.cq
= cq
;
1075 cqp_info
->in
.u
.cq_destroy
.scratch
= (uintptr_t)cqp_request
;
1076 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1078 i40iw_pr_err("CQP-OP Destroy QP fail");
1082 * i40iw_destroy_cq - destroy cq
1083 * @ib_cq: cq pointer
1085 static int i40iw_destroy_cq(struct ib_cq
*ib_cq
)
1087 struct i40iw_cq
*iwcq
;
1088 struct i40iw_device
*iwdev
;
1089 struct i40iw_sc_cq
*cq
;
1092 i40iw_pr_err("ib_cq == NULL\n");
1096 iwcq
= to_iwcq(ib_cq
);
1097 iwdev
= to_iwdev(ib_cq
->device
);
1099 i40iw_cq_wq_destroy(iwdev
, cq
);
1100 cq_free_resources(iwdev
, iwcq
);
1102 i40iw_rem_devusecount(iwdev
);
1107 * i40iw_create_cq - create cq
1108 * @ibdev: device pointer from stack
1109 * @attr: attributes for cq
1110 * @context: user context created during alloc
1113 static struct ib_cq
*i40iw_create_cq(struct ib_device
*ibdev
,
1114 const struct ib_cq_init_attr
*attr
,
1115 struct ib_ucontext
*context
,
1116 struct ib_udata
*udata
)
1118 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
1119 struct i40iw_cq
*iwcq
;
1120 struct i40iw_pbl
*iwpbl
;
1122 struct i40iw_sc_cq
*cq
;
1123 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
1124 struct i40iw_cq_init_info info
;
1125 enum i40iw_status_code status
;
1126 struct i40iw_cqp_request
*cqp_request
;
1127 struct cqp_commands_info
*cqp_info
;
1128 struct i40iw_cq_uk_init_info
*ukinfo
= &info
.cq_uk_init_info
;
1129 unsigned long flags
;
1131 int entries
= attr
->cqe
;
1134 return ERR_PTR(-ENODEV
);
1136 if (entries
> iwdev
->max_cqe
)
1137 return ERR_PTR(-EINVAL
);
1139 iwcq
= kzalloc(sizeof(*iwcq
), GFP_KERNEL
);
1141 return ERR_PTR(-ENOMEM
);
1143 memset(&info
, 0, sizeof(info
));
1145 err_code
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_cqs
,
1146 iwdev
->max_cq
, &cq_num
,
1152 cq
->back_cq
= (void *)iwcq
;
1153 spin_lock_init(&iwcq
->lock
);
1156 ukinfo
->cq_size
= max(entries
, 4);
1157 ukinfo
->cq_id
= cq_num
;
1158 iwcq
->ibcq
.cqe
= info
.cq_uk_init_info
.cq_size
;
1160 if (attr
->comp_vector
< iwdev
->ceqs_count
)
1161 info
.ceq_id
= attr
->comp_vector
;
1162 info
.ceq_id_valid
= true;
1164 info
.type
= I40IW_CQ_TYPE_IWARP
;
1166 struct i40iw_ucontext
*ucontext
;
1167 struct i40iw_create_cq_req req
;
1168 struct i40iw_cq_mr
*cqmr
;
1170 memset(&req
, 0, sizeof(req
));
1171 iwcq
->user_mode
= true;
1172 ucontext
= to_ucontext(context
);
1173 if (ib_copy_from_udata(&req
, udata
, sizeof(struct i40iw_create_cq_req
)))
1174 goto cq_free_resources
;
1176 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
1177 iwpbl
= i40iw_get_pbl((unsigned long)req
.user_cq_buffer
,
1178 &ucontext
->cq_reg_mem_list
);
1179 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
1182 goto cq_free_resources
;
1185 iwcq
->iwpbl
= iwpbl
;
1186 iwcq
->cq_mem_size
= 0;
1187 cqmr
= &iwpbl
->cq_mr
;
1188 info
.shadow_area_pa
= cpu_to_le64(cqmr
->shadow
);
1189 if (iwpbl
->pbl_allocated
) {
1190 info
.virtual_map
= true;
1191 info
.pbl_chunk_size
= 1;
1192 info
.first_pm_pbl_idx
= cqmr
->cq_pbl
.idx
;
1194 info
.cq_base_pa
= cqmr
->cq_pbl
.addr
;
1197 /* Kmode allocations */
1201 rsize
= info
.cq_uk_init_info
.cq_size
* sizeof(struct i40iw_cqe
);
1202 rsize
= round_up(rsize
, 256);
1203 shadow
= I40IW_SHADOW_AREA_SIZE
<< 3;
1204 status
= i40iw_allocate_dma_mem(dev
->hw
, &iwcq
->kmem
,
1205 rsize
+ shadow
, 256);
1208 goto cq_free_resources
;
1210 ukinfo
->cq_base
= iwcq
->kmem
.va
;
1211 info
.cq_base_pa
= iwcq
->kmem
.pa
;
1212 info
.shadow_area_pa
= info
.cq_base_pa
+ rsize
;
1213 ukinfo
->shadow_area
= iwcq
->kmem
.va
+ rsize
;
1216 if (dev
->iw_priv_cq_ops
->cq_init(cq
, &info
)) {
1217 i40iw_pr_err("init cq fail\n");
1219 goto cq_free_resources
;
1222 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1225 goto cq_free_resources
;
1228 cqp_info
= &cqp_request
->info
;
1229 cqp_info
->cqp_cmd
= OP_CQ_CREATE
;
1230 cqp_info
->post_sq
= 1;
1231 cqp_info
->in
.u
.cq_create
.cq
= cq
;
1232 cqp_info
->in
.u
.cq_create
.scratch
= (uintptr_t)cqp_request
;
1233 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1235 i40iw_pr_err("CQP-OP Create QP fail");
1237 goto cq_free_resources
;
1241 struct i40iw_create_cq_resp resp
;
1243 memset(&resp
, 0, sizeof(resp
));
1244 resp
.cq_id
= info
.cq_uk_init_info
.cq_id
;
1245 resp
.cq_size
= info
.cq_uk_init_info
.cq_size
;
1246 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
1247 i40iw_pr_err("copy to user data\n");
1253 i40iw_add_devusecount(iwdev
);
1254 return (struct ib_cq
*)iwcq
;
1257 i40iw_cq_wq_destroy(iwdev
, cq
);
1259 cq_free_resources(iwdev
, iwcq
);
1262 return ERR_PTR(err_code
);
1266 * i40iw_get_user_access - get hw access from IB access
1267 * @acc: IB access to return hw access
1269 static inline u16
i40iw_get_user_access(int acc
)
1273 access
|= (acc
& IB_ACCESS_LOCAL_WRITE
) ? I40IW_ACCESS_FLAGS_LOCALWRITE
: 0;
1274 access
|= (acc
& IB_ACCESS_REMOTE_WRITE
) ? I40IW_ACCESS_FLAGS_REMOTEWRITE
: 0;
1275 access
|= (acc
& IB_ACCESS_REMOTE_READ
) ? I40IW_ACCESS_FLAGS_REMOTEREAD
: 0;
1276 access
|= (acc
& IB_ACCESS_MW_BIND
) ? I40IW_ACCESS_FLAGS_BIND_WINDOW
: 0;
1281 * i40iw_free_stag - free stag resource
1282 * @iwdev: iwarp device
1283 * @stag: stag to free
1285 static void i40iw_free_stag(struct i40iw_device
*iwdev
, u32 stag
)
1289 stag_idx
= (stag
& iwdev
->mr_stagmask
) >> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1290 i40iw_free_resource(iwdev
, iwdev
->allocated_mrs
, stag_idx
);
1291 i40iw_rem_devusecount(iwdev
);
1295 * i40iw_create_stag - create random stag
1296 * @iwdev: iwarp device
1298 static u32
i40iw_create_stag(struct i40iw_device
*iwdev
)
1302 u32 next_stag_index
;
1308 get_random_bytes(&random
, sizeof(random
));
1309 consumer_key
= (u8
)random
;
1311 driver_key
= random
& ~iwdev
->mr_stagmask
;
1312 next_stag_index
= (random
& iwdev
->mr_stagmask
) >> 8;
1313 next_stag_index
%= iwdev
->max_mr
;
1315 ret
= i40iw_alloc_resource(iwdev
,
1316 iwdev
->allocated_mrs
, iwdev
->max_mr
,
1317 &stag_index
, &next_stag_index
);
1319 stag
= stag_index
<< I40IW_CQPSQ_STAG_IDX_SHIFT
;
1321 stag
+= (u32
)consumer_key
;
1322 i40iw_add_devusecount(iwdev
);
1328 * i40iw_next_pbl_addr - Get next pbl address
1329 * @pbl: pointer to a pble
1330 * @pinfo: info pointer
1333 static inline u64
*i40iw_next_pbl_addr(u64
*pbl
,
1334 struct i40iw_pble_info
**pinfo
,
1338 if ((!(*pinfo
)) || (*idx
!= (*pinfo
)->cnt
))
1342 return (u64
*)(*pinfo
)->addr
;
1346 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1347 * @iwmr: iwmr for IB's user page addresses
1348 * @pbl: ple pointer to save 1 level or 0 level pble
1349 * @level: indicated level 0, 1 or 2
1351 static void i40iw_copy_user_pgaddrs(struct i40iw_mr
*iwmr
,
1353 enum i40iw_pble_level level
)
1355 struct ib_umem
*region
= iwmr
->region
;
1356 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1357 int chunk_pages
, entry
, pg_shift
, i
;
1358 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1359 struct i40iw_pble_info
*pinfo
;
1360 struct scatterlist
*sg
;
1364 pinfo
= (level
== I40IW_LEVEL_1
) ? NULL
: palloc
->level2
.leaf
;
1366 pg_shift
= ffs(region
->page_size
) - 1;
1367 for_each_sg(region
->sg_head
.sgl
, sg
, region
->nmap
, entry
) {
1368 chunk_pages
= sg_dma_len(sg
) >> pg_shift
;
1369 if ((iwmr
->type
== IW_MEMREG_TYPE_QP
) &&
1370 !iwpbl
->qp_mr
.sq_page
)
1371 iwpbl
->qp_mr
.sq_page
= sg_page(sg
);
1372 for (i
= 0; i
< chunk_pages
; i
++) {
1373 pg_addr
= sg_dma_address(sg
) + region
->page_size
* i
;
1375 if ((entry
+ i
) == 0)
1376 *pbl
= cpu_to_le64(pg_addr
& iwmr
->page_msk
);
1377 else if (!(pg_addr
& ~iwmr
->page_msk
))
1378 *pbl
= cpu_to_le64(pg_addr
);
1381 pbl
= i40iw_next_pbl_addr(pbl
, &pinfo
, &idx
);
1387 * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1388 * @addr: virtual address
1389 * @iwmr: mr pointer for this memory registration
1391 static void i40iw_set_hugetlb_values(u64 addr
, struct i40iw_mr
*iwmr
)
1393 struct vm_area_struct
*vma
;
1396 vma
= find_vma(current
->mm
, addr
);
1397 if (vma
&& is_vm_hugetlb_page(vma
)) {
1398 h
= hstate_vma(vma
);
1399 if (huge_page_size(h
) == 0x200000) {
1400 iwmr
->page_size
= huge_page_size(h
);
1401 iwmr
->page_msk
= huge_page_mask(h
);
1407 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1408 * @arr: lvl1 pbl array
1409 * @npages: page count
1410 * pg_size: page size
1413 static bool i40iw_check_mem_contiguous(u64
*arr
, u32 npages
, u32 pg_size
)
1417 for (pg_idx
= 0; pg_idx
< npages
; pg_idx
++) {
1418 if ((*arr
+ (pg_size
* pg_idx
)) != arr
[pg_idx
])
1425 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1426 * @palloc: pbl allocation struct
1427 * pg_size: page size
1429 static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc
*palloc
, u32 pg_size
)
1431 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
1432 struct i40iw_pble_info
*leaf
= lvl2
->leaf
;
1434 u64
*start_addr
= NULL
;
1438 if (palloc
->level
== I40IW_LEVEL_1
) {
1439 arr
= (u64
*)palloc
->level1
.addr
;
1440 ret
= i40iw_check_mem_contiguous(arr
, palloc
->total_cnt
, pg_size
);
1444 start_addr
= (u64
*)leaf
->addr
;
1446 for (i
= 0; i
< lvl2
->leaf_cnt
; i
++, leaf
++) {
1447 arr
= (u64
*)leaf
->addr
;
1448 if ((*start_addr
+ (i
* pg_size
* PBLE_PER_PAGE
)) != *arr
)
1450 ret
= i40iw_check_mem_contiguous(arr
, leaf
->cnt
, pg_size
);
1459 * i40iw_setup_pbles - copy user pg address to pble's
1460 * @iwdev: iwarp device
1461 * @iwmr: mr pointer for this memory registration
1462 * @use_pbles: flag if to use pble's
1464 static int i40iw_setup_pbles(struct i40iw_device
*iwdev
,
1465 struct i40iw_mr
*iwmr
,
1468 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1469 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1470 struct i40iw_pble_info
*pinfo
;
1472 enum i40iw_status_code status
;
1473 enum i40iw_pble_level level
= I40IW_LEVEL_1
;
1476 mutex_lock(&iwdev
->pbl_mutex
);
1477 status
= i40iw_get_pble(&iwdev
->sc_dev
, iwdev
->pble_rsrc
, palloc
, iwmr
->page_cnt
);
1478 mutex_unlock(&iwdev
->pbl_mutex
);
1482 iwpbl
->pbl_allocated
= true;
1483 level
= palloc
->level
;
1484 pinfo
= (level
== I40IW_LEVEL_1
) ? &palloc
->level1
: palloc
->level2
.leaf
;
1485 pbl
= (u64
*)pinfo
->addr
;
1487 pbl
= iwmr
->pgaddrmem
;
1490 i40iw_copy_user_pgaddrs(iwmr
, pbl
, level
);
1493 iwmr
->pgaddrmem
[0] = *pbl
;
1499 * i40iw_handle_q_mem - handle memory for qp and cq
1500 * @iwdev: iwarp device
1501 * @req: information for q memory management
1502 * @iwpbl: pble struct
1503 * @use_pbles: flag to use pble
1505 static int i40iw_handle_q_mem(struct i40iw_device
*iwdev
,
1506 struct i40iw_mem_reg_req
*req
,
1507 struct i40iw_pbl
*iwpbl
,
1510 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1511 struct i40iw_mr
*iwmr
= iwpbl
->iwmr
;
1512 struct i40iw_qp_mr
*qpmr
= &iwpbl
->qp_mr
;
1513 struct i40iw_cq_mr
*cqmr
= &iwpbl
->cq_mr
;
1514 struct i40iw_hmc_pble
*hmc_p
;
1515 u64
*arr
= iwmr
->pgaddrmem
;
1521 total
= req
->sq_pages
+ req
->rq_pages
+ req
->cq_pages
;
1522 pg_size
= iwmr
->page_size
;
1524 err
= i40iw_setup_pbles(iwdev
, iwmr
, use_pbles
);
1528 if (use_pbles
&& (palloc
->level
!= I40IW_LEVEL_1
)) {
1529 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1530 iwpbl
->pbl_allocated
= false;
1535 arr
= (u64
*)palloc
->level1
.addr
;
1537 if (iwmr
->type
== IW_MEMREG_TYPE_QP
) {
1538 hmc_p
= &qpmr
->sq_pbl
;
1539 qpmr
->shadow
= (dma_addr_t
)arr
[total
];
1542 ret
= i40iw_check_mem_contiguous(arr
, req
->sq_pages
, pg_size
);
1544 ret
= i40iw_check_mem_contiguous(&arr
[req
->sq_pages
], req
->rq_pages
, pg_size
);
1548 hmc_p
->idx
= palloc
->level1
.idx
;
1549 hmc_p
= &qpmr
->rq_pbl
;
1550 hmc_p
->idx
= palloc
->level1
.idx
+ req
->sq_pages
;
1552 hmc_p
->addr
= arr
[0];
1553 hmc_p
= &qpmr
->rq_pbl
;
1554 hmc_p
->addr
= arr
[req
->sq_pages
];
1557 hmc_p
= &cqmr
->cq_pbl
;
1558 cqmr
->shadow
= (dma_addr_t
)arr
[total
];
1561 ret
= i40iw_check_mem_contiguous(arr
, req
->cq_pages
, pg_size
);
1564 hmc_p
->idx
= palloc
->level1
.idx
;
1566 hmc_p
->addr
= arr
[0];
1569 if (use_pbles
&& ret
) {
1570 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1571 iwpbl
->pbl_allocated
= false;
1578 * i40iw_hw_alloc_stag - cqp command to allocate stag
1579 * @iwdev: iwarp device
1580 * @iwmr: iwarp mr pointer
1582 static int i40iw_hw_alloc_stag(struct i40iw_device
*iwdev
, struct i40iw_mr
*iwmr
)
1584 struct i40iw_allocate_stag_info
*info
;
1585 struct i40iw_pd
*iwpd
= to_iwpd(iwmr
->ibmr
.pd
);
1586 enum i40iw_status_code status
;
1588 struct i40iw_cqp_request
*cqp_request
;
1589 struct cqp_commands_info
*cqp_info
;
1591 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1595 cqp_info
= &cqp_request
->info
;
1596 info
= &cqp_info
->in
.u
.alloc_stag
.info
;
1597 memset(info
, 0, sizeof(*info
));
1598 info
->page_size
= PAGE_SIZE
;
1599 info
->stag_idx
= iwmr
->stag
>> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1600 info
->pd_id
= iwpd
->sc_pd
.pd_id
;
1601 info
->total_len
= iwmr
->length
;
1602 info
->remote_access
= true;
1603 cqp_info
->cqp_cmd
= OP_ALLOC_STAG
;
1604 cqp_info
->post_sq
= 1;
1605 cqp_info
->in
.u
.alloc_stag
.dev
= &iwdev
->sc_dev
;
1606 cqp_info
->in
.u
.alloc_stag
.scratch
= (uintptr_t)cqp_request
;
1608 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1611 i40iw_pr_err("CQP-OP MR Reg fail");
1617 * i40iw_alloc_mr - register stag for fast memory registration
1619 * @mr_type: memory for stag registrion
1620 * @max_num_sg: man number of pages
1622 static struct ib_mr
*i40iw_alloc_mr(struct ib_pd
*pd
,
1623 enum ib_mr_type mr_type
,
1626 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1627 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1628 struct i40iw_pble_alloc
*palloc
;
1629 struct i40iw_pbl
*iwpbl
;
1630 struct i40iw_mr
*iwmr
;
1631 enum i40iw_status_code status
;
1633 int err_code
= -ENOMEM
;
1635 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1637 return ERR_PTR(-ENOMEM
);
1639 stag
= i40iw_create_stag(iwdev
);
1641 err_code
= -EOVERFLOW
;
1645 iwmr
->ibmr
.rkey
= stag
;
1646 iwmr
->ibmr
.lkey
= stag
;
1648 iwmr
->ibmr
.device
= pd
->device
;
1649 iwpbl
= &iwmr
->iwpbl
;
1651 iwmr
->type
= IW_MEMREG_TYPE_MEM
;
1652 palloc
= &iwpbl
->pble_alloc
;
1653 iwmr
->page_cnt
= max_num_sg
;
1654 mutex_lock(&iwdev
->pbl_mutex
);
1655 status
= i40iw_get_pble(&iwdev
->sc_dev
, iwdev
->pble_rsrc
, palloc
, iwmr
->page_cnt
);
1656 mutex_unlock(&iwdev
->pbl_mutex
);
1660 if (palloc
->level
!= I40IW_LEVEL_1
)
1662 err_code
= i40iw_hw_alloc_stag(iwdev
, iwmr
);
1665 iwpbl
->pbl_allocated
= true;
1666 i40iw_add_pdusecount(iwpd
);
1669 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1671 i40iw_free_stag(iwdev
, stag
);
1674 return ERR_PTR(err_code
);
1678 * i40iw_set_page - populate pbl list for fmr
1679 * @ibmr: ib mem to access iwarp mr pointer
1680 * @addr: page dma address fro pbl list
1682 static int i40iw_set_page(struct ib_mr
*ibmr
, u64 addr
)
1684 struct i40iw_mr
*iwmr
= to_iwmr(ibmr
);
1685 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1686 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1689 if (unlikely(iwmr
->npages
== iwmr
->page_cnt
))
1692 pbl
= (u64
*)palloc
->level1
.addr
;
1693 pbl
[iwmr
->npages
++] = cpu_to_le64(addr
);
1698 * i40iw_map_mr_sg - map of sg list for fmr
1699 * @ibmr: ib mem to access iwarp mr pointer
1700 * @sg: scatter gather list for fmr
1701 * @sg_nents: number of sg pages
1703 static int i40iw_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
1704 int sg_nents
, unsigned int *sg_offset
)
1706 struct i40iw_mr
*iwmr
= to_iwmr(ibmr
);
1709 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, i40iw_set_page
);
1713 * i40iw_drain_sq - drain the send queue
1714 * @ibqp: ib qp pointer
1716 static void i40iw_drain_sq(struct ib_qp
*ibqp
)
1718 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
1719 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
1721 if (I40IW_RING_MORE_WORK(qp
->qp_uk
.sq_ring
))
1722 wait_for_completion(&iwqp
->sq_drained
);
1726 * i40iw_drain_rq - drain the receive queue
1727 * @ibqp: ib qp pointer
1729 static void i40iw_drain_rq(struct ib_qp
*ibqp
)
1731 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
1732 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
1734 if (I40IW_RING_MORE_WORK(qp
->qp_uk
.rq_ring
))
1735 wait_for_completion(&iwqp
->rq_drained
);
1739 * i40iw_hwreg_mr - send cqp command for memory registration
1740 * @iwdev: iwarp device
1741 * @iwmr: iwarp mr pointer
1742 * @access: access for MR
1744 static int i40iw_hwreg_mr(struct i40iw_device
*iwdev
,
1745 struct i40iw_mr
*iwmr
,
1748 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1749 struct i40iw_reg_ns_stag_info
*stag_info
;
1750 struct i40iw_pd
*iwpd
= to_iwpd(iwmr
->ibmr
.pd
);
1751 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1752 enum i40iw_status_code status
;
1754 struct i40iw_cqp_request
*cqp_request
;
1755 struct cqp_commands_info
*cqp_info
;
1757 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1761 cqp_info
= &cqp_request
->info
;
1762 stag_info
= &cqp_info
->in
.u
.mr_reg_non_shared
.info
;
1763 memset(stag_info
, 0, sizeof(*stag_info
));
1764 stag_info
->va
= (void *)(unsigned long)iwpbl
->user_base
;
1765 stag_info
->stag_idx
= iwmr
->stag
>> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1766 stag_info
->stag_key
= (u8
)iwmr
->stag
;
1767 stag_info
->total_len
= iwmr
->length
;
1768 stag_info
->access_rights
= access
;
1769 stag_info
->pd_id
= iwpd
->sc_pd
.pd_id
;
1770 stag_info
->addr_type
= I40IW_ADDR_TYPE_VA_BASED
;
1771 stag_info
->page_size
= iwmr
->page_size
;
1773 if (iwpbl
->pbl_allocated
) {
1774 if (palloc
->level
== I40IW_LEVEL_1
) {
1775 stag_info
->first_pm_pbl_index
= palloc
->level1
.idx
;
1776 stag_info
->chunk_size
= 1;
1778 stag_info
->first_pm_pbl_index
= palloc
->level2
.root
.idx
;
1779 stag_info
->chunk_size
= 3;
1782 stag_info
->reg_addr_pa
= iwmr
->pgaddrmem
[0];
1785 cqp_info
->cqp_cmd
= OP_MR_REG_NON_SHARED
;
1786 cqp_info
->post_sq
= 1;
1787 cqp_info
->in
.u
.mr_reg_non_shared
.dev
= &iwdev
->sc_dev
;
1788 cqp_info
->in
.u
.mr_reg_non_shared
.scratch
= (uintptr_t)cqp_request
;
1790 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1793 i40iw_pr_err("CQP-OP MR Reg fail");
1799 * i40iw_reg_user_mr - Register a user memory region
1801 * @start: virtual start address
1802 * @length: length of mr
1803 * @virt: virtual address
1804 * @acc: access of mr
1807 static struct ib_mr
*i40iw_reg_user_mr(struct ib_pd
*pd
,
1812 struct ib_udata
*udata
)
1814 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1815 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1816 struct i40iw_ucontext
*ucontext
;
1817 struct i40iw_pble_alloc
*palloc
;
1818 struct i40iw_pbl
*iwpbl
;
1819 struct i40iw_mr
*iwmr
;
1820 struct ib_umem
*region
;
1821 struct i40iw_mem_reg_req req
;
1826 bool use_pbles
= false;
1827 unsigned long flags
;
1833 return ERR_PTR(-ENODEV
);
1835 if (length
> I40IW_MAX_MR_SIZE
)
1836 return ERR_PTR(-EINVAL
);
1837 region
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
1839 return (struct ib_mr
*)region
;
1841 if (ib_copy_from_udata(&req
, udata
, sizeof(req
))) {
1842 ib_umem_release(region
);
1843 return ERR_PTR(-EFAULT
);
1846 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1848 ib_umem_release(region
);
1849 return ERR_PTR(-ENOMEM
);
1852 iwpbl
= &iwmr
->iwpbl
;
1854 iwmr
->region
= region
;
1856 iwmr
->ibmr
.device
= pd
->device
;
1857 ucontext
= to_ucontext(pd
->uobject
->context
);
1859 iwmr
->page_size
= region
->page_size
;
1860 iwmr
->page_msk
= PAGE_MASK
;
1862 if (region
->hugetlb
&& (req
.reg_type
== IW_MEMREG_TYPE_MEM
))
1863 i40iw_set_hugetlb_values(start
, iwmr
);
1865 region_length
= region
->length
+ (start
& (iwmr
->page_size
- 1));
1866 pg_shift
= ffs(iwmr
->page_size
) - 1;
1867 pbl_depth
= region_length
>> pg_shift
;
1868 pbl_depth
+= (region_length
& (iwmr
->page_size
- 1)) ? 1 : 0;
1869 iwmr
->length
= region
->length
;
1871 iwpbl
->user_base
= virt
;
1872 palloc
= &iwpbl
->pble_alloc
;
1874 iwmr
->type
= req
.reg_type
;
1875 iwmr
->page_cnt
= (u32
)pbl_depth
;
1877 switch (req
.reg_type
) {
1878 case IW_MEMREG_TYPE_QP
:
1879 use_pbles
= ((req
.sq_pages
+ req
.rq_pages
) > 2);
1880 err
= i40iw_handle_q_mem(iwdev
, &req
, iwpbl
, use_pbles
);
1883 spin_lock_irqsave(&ucontext
->qp_reg_mem_list_lock
, flags
);
1884 list_add_tail(&iwpbl
->list
, &ucontext
->qp_reg_mem_list
);
1885 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
1887 case IW_MEMREG_TYPE_CQ
:
1888 use_pbles
= (req
.cq_pages
> 1);
1889 err
= i40iw_handle_q_mem(iwdev
, &req
, iwpbl
, use_pbles
);
1893 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
1894 list_add_tail(&iwpbl
->list
, &ucontext
->cq_reg_mem_list
);
1895 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
1897 case IW_MEMREG_TYPE_MEM
:
1898 use_pbles
= (iwmr
->page_cnt
!= 1);
1899 access
= I40IW_ACCESS_FLAGS_LOCALREAD
;
1901 err
= i40iw_setup_pbles(iwdev
, iwmr
, use_pbles
);
1906 ret
= i40iw_check_mr_contiguous(palloc
, iwmr
->page_size
);
1908 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1909 iwpbl
->pbl_allocated
= false;
1913 access
|= i40iw_get_user_access(acc
);
1914 stag
= i40iw_create_stag(iwdev
);
1921 iwmr
->ibmr
.rkey
= stag
;
1922 iwmr
->ibmr
.lkey
= stag
;
1924 err
= i40iw_hwreg_mr(iwdev
, iwmr
, access
);
1926 i40iw_free_stag(iwdev
, stag
);
1935 iwmr
->type
= req
.reg_type
;
1936 if (req
.reg_type
== IW_MEMREG_TYPE_MEM
)
1937 i40iw_add_pdusecount(iwpd
);
1941 if (palloc
->level
!= I40IW_LEVEL_0
&& iwpbl
->pbl_allocated
)
1942 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1943 ib_umem_release(region
);
1945 return ERR_PTR(err
);
1949 * i40iw_reg_phys_mr - register kernel physical memory
1951 * @addr: physical address of memory to register
1952 * @size: size of memory to register
1953 * @acc: Access rights
1954 * @iova_start: start of virtual address for physical buffers
1956 struct ib_mr
*i40iw_reg_phys_mr(struct ib_pd
*pd
,
1962 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1963 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1964 struct i40iw_pbl
*iwpbl
;
1965 struct i40iw_mr
*iwmr
;
1966 enum i40iw_status_code status
;
1968 u16 access
= I40IW_ACCESS_FLAGS_LOCALREAD
;
1971 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1973 return ERR_PTR(-ENOMEM
);
1975 iwmr
->ibmr
.device
= pd
->device
;
1976 iwpbl
= &iwmr
->iwpbl
;
1978 iwmr
->type
= IW_MEMREG_TYPE_MEM
;
1979 iwpbl
->user_base
= *iova_start
;
1980 stag
= i40iw_create_stag(iwdev
);
1985 access
|= i40iw_get_user_access(acc
);
1987 iwmr
->ibmr
.rkey
= stag
;
1988 iwmr
->ibmr
.lkey
= stag
;
1990 iwmr
->pgaddrmem
[0] = addr
;
1991 iwmr
->length
= size
;
1992 status
= i40iw_hwreg_mr(iwdev
, iwmr
, access
);
1994 i40iw_free_stag(iwdev
, stag
);
1999 i40iw_add_pdusecount(iwpd
);
2003 return ERR_PTR(ret
);
2007 * i40iw_get_dma_mr - register physical mem
2009 * @acc: access for memory
2011 static struct ib_mr
*i40iw_get_dma_mr(struct ib_pd
*pd
, int acc
)
2015 return i40iw_reg_phys_mr(pd
, 0, 0, acc
, &kva
);
2019 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
2020 * @iwmr: iwmr for IB's user page addresses
2021 * @ucontext: ptr to user context
2023 static void i40iw_del_memlist(struct i40iw_mr
*iwmr
,
2024 struct i40iw_ucontext
*ucontext
)
2026 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
2027 unsigned long flags
;
2029 switch (iwmr
->type
) {
2030 case IW_MEMREG_TYPE_CQ
:
2031 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
2032 if (!list_empty(&ucontext
->cq_reg_mem_list
))
2033 list_del(&iwpbl
->list
);
2034 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
2036 case IW_MEMREG_TYPE_QP
:
2037 spin_lock_irqsave(&ucontext
->qp_reg_mem_list_lock
, flags
);
2038 if (!list_empty(&ucontext
->qp_reg_mem_list
))
2039 list_del(&iwpbl
->list
);
2040 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
2048 * i40iw_dereg_mr - deregister mr
2049 * @ib_mr: mr ptr for dereg
2051 static int i40iw_dereg_mr(struct ib_mr
*ib_mr
)
2053 struct ib_pd
*ibpd
= ib_mr
->pd
;
2054 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
2055 struct i40iw_mr
*iwmr
= to_iwmr(ib_mr
);
2056 struct i40iw_device
*iwdev
= to_iwdev(ib_mr
->device
);
2057 enum i40iw_status_code status
;
2058 struct i40iw_dealloc_stag_info
*info
;
2059 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
2060 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
2061 struct i40iw_cqp_request
*cqp_request
;
2062 struct cqp_commands_info
*cqp_info
;
2066 ib_umem_release(iwmr
->region
);
2068 if (iwmr
->type
!= IW_MEMREG_TYPE_MEM
) {
2069 if (ibpd
->uobject
) {
2070 struct i40iw_ucontext
*ucontext
;
2072 ucontext
= to_ucontext(ibpd
->uobject
->context
);
2073 i40iw_del_memlist(iwmr
, ucontext
);
2075 if (iwpbl
->pbl_allocated
)
2076 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
2081 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
2085 cqp_info
= &cqp_request
->info
;
2086 info
= &cqp_info
->in
.u
.dealloc_stag
.info
;
2087 memset(info
, 0, sizeof(*info
));
2089 info
->pd_id
= cpu_to_le32(iwpd
->sc_pd
.pd_id
& 0x00007fff);
2090 info
->stag_idx
= RS_64_1(ib_mr
->rkey
, I40IW_CQPSQ_STAG_IDX_SHIFT
);
2091 stag_idx
= info
->stag_idx
;
2093 if (iwpbl
->pbl_allocated
)
2094 info
->dealloc_pbl
= true;
2096 cqp_info
->cqp_cmd
= OP_DEALLOC_STAG
;
2097 cqp_info
->post_sq
= 1;
2098 cqp_info
->in
.u
.dealloc_stag
.dev
= &iwdev
->sc_dev
;
2099 cqp_info
->in
.u
.dealloc_stag
.scratch
= (uintptr_t)cqp_request
;
2100 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
2102 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx
);
2103 i40iw_rem_pdusecount(iwpd
, iwdev
);
2104 i40iw_free_stag(iwdev
, iwmr
->stag
);
2105 if (iwpbl
->pbl_allocated
)
2106 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
2114 static ssize_t
i40iw_show_rev(struct device
*dev
,
2115 struct device_attribute
*attr
, char *buf
)
2117 struct i40iw_ib_device
*iwibdev
= container_of(dev
,
2118 struct i40iw_ib_device
,
2120 u32 hw_rev
= iwibdev
->iwdev
->sc_dev
.hw_rev
;
2122 return sprintf(buf
, "%x\n", hw_rev
);
2128 static ssize_t
i40iw_show_hca(struct device
*dev
,
2129 struct device_attribute
*attr
, char *buf
)
2131 return sprintf(buf
, "I40IW\n");
2137 static ssize_t
i40iw_show_board(struct device
*dev
,
2138 struct device_attribute
*attr
,
2141 return sprintf(buf
, "%.*s\n", 32, "I40IW Board ID");
2144 static DEVICE_ATTR(hw_rev
, S_IRUGO
, i40iw_show_rev
, NULL
);
2145 static DEVICE_ATTR(hca_type
, S_IRUGO
, i40iw_show_hca
, NULL
);
2146 static DEVICE_ATTR(board_id
, S_IRUGO
, i40iw_show_board
, NULL
);
2148 static struct device_attribute
*i40iw_dev_attributes
[] = {
2155 * i40iw_copy_sg_list - copy sg list for qp
2156 * @sg_list: copied into sg_list
2157 * @sgl: copy from sgl
2158 * @num_sges: count of sg entries
2160 static void i40iw_copy_sg_list(struct i40iw_sge
*sg_list
, struct ib_sge
*sgl
, int num_sges
)
2164 for (i
= 0; (i
< num_sges
) && (i
< I40IW_MAX_WQ_FRAGMENT_COUNT
); i
++) {
2165 sg_list
[i
].tag_off
= sgl
[i
].addr
;
2166 sg_list
[i
].len
= sgl
[i
].length
;
2167 sg_list
[i
].stag
= sgl
[i
].lkey
;
2172 * i40iw_post_send - kernel application wr
2173 * @ibqp: qp ptr for wr
2174 * @ib_wr: work request ptr
2175 * @bad_wr: return of bad wr if err
2177 static int i40iw_post_send(struct ib_qp
*ibqp
,
2178 struct ib_send_wr
*ib_wr
,
2179 struct ib_send_wr
**bad_wr
)
2181 struct i40iw_qp
*iwqp
;
2182 struct i40iw_qp_uk
*ukqp
;
2183 struct i40iw_post_sq_info info
;
2184 enum i40iw_status_code ret
;
2186 unsigned long flags
;
2189 iwqp
= (struct i40iw_qp
*)ibqp
;
2190 ukqp
= &iwqp
->sc_qp
.qp_uk
;
2192 spin_lock_irqsave(&iwqp
->lock
, flags
);
2195 memset(&info
, 0, sizeof(info
));
2196 info
.wr_id
= (u64
)(ib_wr
->wr_id
);
2197 if ((ib_wr
->send_flags
& IB_SEND_SIGNALED
) || iwqp
->sig_all
)
2198 info
.signaled
= true;
2199 if (ib_wr
->send_flags
& IB_SEND_FENCE
)
2200 info
.read_fence
= true;
2202 switch (ib_wr
->opcode
) {
2205 case IB_WR_SEND_WITH_INV
:
2206 if (ib_wr
->opcode
== IB_WR_SEND
) {
2207 if (ib_wr
->send_flags
& IB_SEND_SOLICITED
)
2208 info
.op_type
= I40IW_OP_TYPE_SEND_SOL
;
2210 info
.op_type
= I40IW_OP_TYPE_SEND
;
2212 if (ib_wr
->send_flags
& IB_SEND_SOLICITED
)
2213 info
.op_type
= I40IW_OP_TYPE_SEND_SOL_INV
;
2215 info
.op_type
= I40IW_OP_TYPE_SEND_INV
;
2218 if (ib_wr
->send_flags
& IB_SEND_INLINE
) {
2219 info
.op
.inline_send
.data
= (void *)(unsigned long)ib_wr
->sg_list
[0].addr
;
2220 info
.op
.inline_send
.len
= ib_wr
->sg_list
[0].length
;
2221 ret
= ukqp
->ops
.iw_inline_send(ukqp
, &info
, ib_wr
->ex
.invalidate_rkey
, false);
2223 info
.op
.send
.num_sges
= ib_wr
->num_sge
;
2224 info
.op
.send
.sg_list
= (struct i40iw_sge
*)ib_wr
->sg_list
;
2225 ret
= ukqp
->ops
.iw_send(ukqp
, &info
, ib_wr
->ex
.invalidate_rkey
, false);
2229 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2235 case IB_WR_RDMA_WRITE
:
2236 info
.op_type
= I40IW_OP_TYPE_RDMA_WRITE
;
2238 if (ib_wr
->send_flags
& IB_SEND_INLINE
) {
2239 info
.op
.inline_rdma_write
.data
= (void *)(unsigned long)ib_wr
->sg_list
[0].addr
;
2240 info
.op
.inline_rdma_write
.len
= ib_wr
->sg_list
[0].length
;
2241 info
.op
.inline_rdma_write
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2242 info
.op
.inline_rdma_write
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2243 info
.op
.inline_rdma_write
.rem_addr
.len
= ib_wr
->sg_list
->length
;
2244 ret
= ukqp
->ops
.iw_inline_rdma_write(ukqp
, &info
, false);
2246 info
.op
.rdma_write
.lo_sg_list
= (void *)ib_wr
->sg_list
;
2247 info
.op
.rdma_write
.num_lo_sges
= ib_wr
->num_sge
;
2248 info
.op
.rdma_write
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2249 info
.op
.rdma_write
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2250 info
.op
.rdma_write
.rem_addr
.len
= ib_wr
->sg_list
->length
;
2251 ret
= ukqp
->ops
.iw_rdma_write(ukqp
, &info
, false);
2255 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2261 case IB_WR_RDMA_READ_WITH_INV
:
2264 case IB_WR_RDMA_READ
:
2265 if (ib_wr
->num_sge
> I40IW_MAX_SGE_RD
) {
2269 info
.op_type
= I40IW_OP_TYPE_RDMA_READ
;
2270 info
.op
.rdma_read
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2271 info
.op
.rdma_read
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2272 info
.op
.rdma_read
.rem_addr
.len
= ib_wr
->sg_list
->length
;
2273 info
.op
.rdma_read
.lo_addr
.tag_off
= ib_wr
->sg_list
->addr
;
2274 info
.op
.rdma_read
.lo_addr
.stag
= ib_wr
->sg_list
->lkey
;
2275 info
.op
.rdma_read
.lo_addr
.len
= ib_wr
->sg_list
->length
;
2276 ret
= ukqp
->ops
.iw_rdma_read(ukqp
, &info
, inv_stag
, false);
2278 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2284 case IB_WR_LOCAL_INV
:
2285 info
.op_type
= I40IW_OP_TYPE_INV_STAG
;
2286 info
.op
.inv_local_stag
.target_stag
= ib_wr
->ex
.invalidate_rkey
;
2287 ret
= ukqp
->ops
.iw_stag_local_invalidate(ukqp
, &info
, true);
2293 struct i40iw_mr
*iwmr
= to_iwmr(reg_wr(ib_wr
)->mr
);
2294 int flags
= reg_wr(ib_wr
)->access
;
2295 struct i40iw_pble_alloc
*palloc
= &iwmr
->iwpbl
.pble_alloc
;
2296 struct i40iw_sc_dev
*dev
= &iwqp
->iwdev
->sc_dev
;
2297 struct i40iw_fast_reg_stag_info info
;
2299 memset(&info
, 0, sizeof(info
));
2300 info
.access_rights
= I40IW_ACCESS_FLAGS_LOCALREAD
;
2301 info
.access_rights
|= i40iw_get_user_access(flags
);
2302 info
.stag_key
= reg_wr(ib_wr
)->key
& 0xff;
2303 info
.stag_idx
= reg_wr(ib_wr
)->key
>> 8;
2304 info
.page_size
= reg_wr(ib_wr
)->mr
->page_size
;
2305 info
.wr_id
= ib_wr
->wr_id
;
2307 info
.addr_type
= I40IW_ADDR_TYPE_VA_BASED
;
2308 info
.va
= (void *)(uintptr_t)iwmr
->ibmr
.iova
;
2309 info
.total_len
= iwmr
->ibmr
.length
;
2310 info
.reg_addr_pa
= *(u64
*)palloc
->level1
.addr
;
2311 info
.first_pm_pbl_index
= palloc
->level1
.idx
;
2312 info
.local_fence
= ib_wr
->send_flags
& IB_SEND_FENCE
;
2313 info
.signaled
= ib_wr
->send_flags
& IB_SEND_SIGNALED
;
2315 if (iwmr
->npages
> I40IW_MIN_PAGES_PER_FMR
)
2316 info
.chunk_size
= 1;
2318 ret
= dev
->iw_priv_qp_ops
->iw_mr_fast_register(&iwqp
->sc_qp
, &info
, true);
2325 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2332 ib_wr
= ib_wr
->next
;
2338 ukqp
->ops
.iw_qp_post_wr(ukqp
);
2339 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
2345 * i40iw_post_recv - post receive wr for kernel application
2346 * @ibqp: ib qp pointer
2347 * @ib_wr: work request for receive
2348 * @bad_wr: bad wr caused an error
2350 static int i40iw_post_recv(struct ib_qp
*ibqp
,
2351 struct ib_recv_wr
*ib_wr
,
2352 struct ib_recv_wr
**bad_wr
)
2354 struct i40iw_qp
*iwqp
;
2355 struct i40iw_qp_uk
*ukqp
;
2356 struct i40iw_post_rq_info post_recv
;
2357 struct i40iw_sge sg_list
[I40IW_MAX_WQ_FRAGMENT_COUNT
];
2358 enum i40iw_status_code ret
= 0;
2359 unsigned long flags
;
2362 iwqp
= (struct i40iw_qp
*)ibqp
;
2363 ukqp
= &iwqp
->sc_qp
.qp_uk
;
2365 memset(&post_recv
, 0, sizeof(post_recv
));
2366 spin_lock_irqsave(&iwqp
->lock
, flags
);
2368 post_recv
.num_sges
= ib_wr
->num_sge
;
2369 post_recv
.wr_id
= ib_wr
->wr_id
;
2370 i40iw_copy_sg_list(sg_list
, ib_wr
->sg_list
, ib_wr
->num_sge
);
2371 post_recv
.sg_list
= sg_list
;
2372 ret
= ukqp
->ops
.iw_post_receive(ukqp
, &post_recv
);
2374 i40iw_pr_err(" post_recv err %d\n", ret
);
2375 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2382 ib_wr
= ib_wr
->next
;
2385 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
2390 * i40iw_poll_cq - poll cq for completion (kernel apps)
2392 * @num_entries: number of entries to poll
2393 * @entry: wr of entry completed
2395 static int i40iw_poll_cq(struct ib_cq
*ibcq
,
2397 struct ib_wc
*entry
)
2399 struct i40iw_cq
*iwcq
;
2401 struct i40iw_cq_poll_info cq_poll_info
;
2402 enum i40iw_status_code ret
;
2403 struct i40iw_cq_uk
*ukcq
;
2404 struct i40iw_sc_qp
*qp
;
2405 struct i40iw_qp
*iwqp
;
2406 unsigned long flags
;
2408 iwcq
= (struct i40iw_cq
*)ibcq
;
2409 ukcq
= &iwcq
->sc_cq
.cq_uk
;
2411 spin_lock_irqsave(&iwcq
->lock
, flags
);
2412 while (cqe_count
< num_entries
) {
2413 ret
= ukcq
->ops
.iw_cq_poll_completion(ukcq
, &cq_poll_info
);
2414 if (ret
== I40IW_ERR_QUEUE_EMPTY
) {
2416 } else if (ret
== I40IW_ERR_QUEUE_DESTROYED
) {
2423 entry
->wc_flags
= 0;
2424 entry
->wr_id
= cq_poll_info
.wr_id
;
2425 if (cq_poll_info
.error
) {
2426 entry
->status
= IB_WC_WR_FLUSH_ERR
;
2427 entry
->vendor_err
= cq_poll_info
.major_err
<< 16 | cq_poll_info
.minor_err
;
2429 entry
->status
= IB_WC_SUCCESS
;
2432 switch (cq_poll_info
.op_type
) {
2433 case I40IW_OP_TYPE_RDMA_WRITE
:
2434 entry
->opcode
= IB_WC_RDMA_WRITE
;
2436 case I40IW_OP_TYPE_RDMA_READ_INV_STAG
:
2437 case I40IW_OP_TYPE_RDMA_READ
:
2438 entry
->opcode
= IB_WC_RDMA_READ
;
2440 case I40IW_OP_TYPE_SEND_SOL
:
2441 case I40IW_OP_TYPE_SEND_SOL_INV
:
2442 case I40IW_OP_TYPE_SEND_INV
:
2443 case I40IW_OP_TYPE_SEND
:
2444 entry
->opcode
= IB_WC_SEND
;
2446 case I40IW_OP_TYPE_REC
:
2447 entry
->opcode
= IB_WC_RECV
;
2450 entry
->opcode
= IB_WC_RECV
;
2454 entry
->ex
.imm_data
= 0;
2455 qp
= (struct i40iw_sc_qp
*)cq_poll_info
.qp_handle
;
2456 entry
->qp
= (struct ib_qp
*)qp
->back_qp
;
2457 entry
->src_qp
= cq_poll_info
.qp_id
;
2458 iwqp
= (struct i40iw_qp
*)qp
->back_qp
;
2459 if (iwqp
->iwarp_state
> I40IW_QP_STATE_RTS
) {
2460 if (!I40IW_RING_MORE_WORK(qp
->qp_uk
.sq_ring
))
2461 complete(&iwqp
->sq_drained
);
2462 if (!I40IW_RING_MORE_WORK(qp
->qp_uk
.rq_ring
))
2463 complete(&iwqp
->rq_drained
);
2465 entry
->byte_len
= cq_poll_info
.bytes_xfered
;
2469 spin_unlock_irqrestore(&iwcq
->lock
, flags
);
2474 * i40iw_req_notify_cq - arm cq kernel application
2476 * @notify_flags: notofication flags
2478 static int i40iw_req_notify_cq(struct ib_cq
*ibcq
,
2479 enum ib_cq_notify_flags notify_flags
)
2481 struct i40iw_cq
*iwcq
;
2482 struct i40iw_cq_uk
*ukcq
;
2483 unsigned long flags
;
2484 enum i40iw_completion_notify cq_notify
= IW_CQ_COMPL_EVENT
;
2486 iwcq
= (struct i40iw_cq
*)ibcq
;
2487 ukcq
= &iwcq
->sc_cq
.cq_uk
;
2488 if (notify_flags
== IB_CQ_SOLICITED
)
2489 cq_notify
= IW_CQ_COMPL_SOLICITED
;
2490 spin_lock_irqsave(&iwcq
->lock
, flags
);
2491 ukcq
->ops
.iw_cq_request_notification(ukcq
, cq_notify
);
2492 spin_unlock_irqrestore(&iwcq
->lock
, flags
);
2497 * i40iw_port_immutable - return port's immutable data
2498 * @ibdev: ib dev struct
2499 * @port_num: port number
2500 * @immutable: immutable data for the port return
2502 static int i40iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2503 struct ib_port_immutable
*immutable
)
2505 struct ib_port_attr attr
;
2508 err
= i40iw_query_port(ibdev
, port_num
, &attr
);
2513 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2514 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2515 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
2520 static const char * const i40iw_hw_stat_names
[] = {
2522 [I40IW_HW_STAT_INDEX_IP4RXDISCARD
] = "ip4InDiscards",
2523 [I40IW_HW_STAT_INDEX_IP4RXTRUNC
] = "ip4InTruncatedPkts",
2524 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] = "ip4OutNoRoutes",
2525 [I40IW_HW_STAT_INDEX_IP6RXDISCARD
] = "ip6InDiscards",
2526 [I40IW_HW_STAT_INDEX_IP6RXTRUNC
] = "ip6InTruncatedPkts",
2527 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] = "ip6OutNoRoutes",
2528 [I40IW_HW_STAT_INDEX_TCPRTXSEG
] = "tcpRetransSegs",
2529 [I40IW_HW_STAT_INDEX_TCPRXOPTERR
] = "tcpInOptErrors",
2530 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] = "tcpInProtoErrors",
2532 [I40IW_HW_STAT_INDEX_IP4RXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2534 [I40IW_HW_STAT_INDEX_IP4RXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2536 [I40IW_HW_STAT_INDEX_IP4RXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2538 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2540 [I40IW_HW_STAT_INDEX_IP4TXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2542 [I40IW_HW_STAT_INDEX_IP4TXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2544 [I40IW_HW_STAT_INDEX_IP4TXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2546 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2548 [I40IW_HW_STAT_INDEX_IP6RXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2550 [I40IW_HW_STAT_INDEX_IP6RXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2552 [I40IW_HW_STAT_INDEX_IP6RXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2554 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2556 [I40IW_HW_STAT_INDEX_IP6TXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2558 [I40IW_HW_STAT_INDEX_IP6TXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2560 [I40IW_HW_STAT_INDEX_IP6TXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2562 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2564 [I40IW_HW_STAT_INDEX_TCPRXSEGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2566 [I40IW_HW_STAT_INDEX_TCPTXSEG
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2568 [I40IW_HW_STAT_INDEX_RDMARXRDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2570 [I40IW_HW_STAT_INDEX_RDMARXSNDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2572 [I40IW_HW_STAT_INDEX_RDMARXWRS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2574 [I40IW_HW_STAT_INDEX_RDMATXRDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2576 [I40IW_HW_STAT_INDEX_RDMATXSNDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2578 [I40IW_HW_STAT_INDEX_RDMATXWRS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2580 [I40IW_HW_STAT_INDEX_RDMAVBND
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2582 [I40IW_HW_STAT_INDEX_RDMAVINV
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2586 static void i40iw_get_dev_fw_str(struct ib_device
*dev
, char *str
,
2589 u32 firmware_version
= I40IW_FW_VERSION
;
2591 snprintf(str
, str_len
, "%u.%u", firmware_version
,
2592 (firmware_version
& 0x000000ff));
2596 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2597 * @ibdev: device pointer from stack
2598 * @port_num: port number
2600 static struct rdma_hw_stats
*i40iw_alloc_hw_stats(struct ib_device
*ibdev
,
2603 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2604 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
2605 int num_counters
= I40IW_HW_STAT_INDEX_MAX_32
+
2606 I40IW_HW_STAT_INDEX_MAX_64
;
2607 unsigned long lifespan
= RDMA_HW_STATS_DEFAULT_LIFESPAN
;
2609 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names
) !=
2610 (I40IW_HW_STAT_INDEX_MAX_32
+
2611 I40IW_HW_STAT_INDEX_MAX_64
));
2614 * PFs get the default update lifespan, but VFs only update once
2619 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names
, num_counters
,
2624 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2625 * @ibdev: device pointer from stack
2626 * @stats: stats pointer from stack
2627 * @port_num: port number
2628 * @index: which hw counter the stack is requesting we update
2630 static int i40iw_get_hw_stats(struct ib_device
*ibdev
,
2631 struct rdma_hw_stats
*stats
,
2632 u8 port_num
, int index
)
2634 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2635 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
2636 struct i40iw_vsi_pestat
*devstat
= iwdev
->vsi
.pestat
;
2637 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
2640 i40iw_hw_stats_read_all(devstat
, &devstat
->hw_stats
);
2642 if (i40iw_vchnl_vf_get_pe_stats(dev
, &devstat
->hw_stats
))
2646 memcpy(&stats
->value
[0], hw_stats
, sizeof(*hw_stats
));
2648 return stats
->num_counters
;
2652 * i40iw_query_gid - Query port GID
2653 * @ibdev: device pointer from stack
2654 * @port: port number
2655 * @index: Entry index
2658 static int i40iw_query_gid(struct ib_device
*ibdev
,
2663 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2665 memset(gid
->raw
, 0, sizeof(gid
->raw
));
2666 ether_addr_copy(gid
->raw
, iwdev
->netdev
->dev_addr
);
2671 * i40iw_modify_port Modify port properties
2672 * @ibdev: device pointer from stack
2673 * @port: port number
2674 * @port_modify_mask: mask for port modifications
2675 * @props: port properties
2677 static int i40iw_modify_port(struct ib_device
*ibdev
,
2679 int port_modify_mask
,
2680 struct ib_port_modify
*props
)
2686 * i40iw_query_pkey - Query partition key
2687 * @ibdev: device pointer from stack
2688 * @port: port number
2689 * @index: index of pkey
2690 * @pkey: pointer to store the pkey
2692 static int i40iw_query_pkey(struct ib_device
*ibdev
,
2702 * i40iw_create_ah - create address handle
2704 * @ah_attr: address handle attributes
2706 static struct ib_ah
*i40iw_create_ah(struct ib_pd
*ibpd
,
2707 struct ib_ah_attr
*attr
,
2708 struct ib_udata
*udata
)
2711 return ERR_PTR(-ENOSYS
);
2715 * i40iw_destroy_ah - Destroy address handle
2716 * @ah: pointer to address handle
2718 static int i40iw_destroy_ah(struct ib_ah
*ah
)
2724 * i40iw_init_rdma_device - initialization of iwarp device
2725 * @iwdev: iwarp device
2727 static struct i40iw_ib_device
*i40iw_init_rdma_device(struct i40iw_device
*iwdev
)
2729 struct i40iw_ib_device
*iwibdev
;
2730 struct net_device
*netdev
= iwdev
->netdev
;
2731 struct pci_dev
*pcidev
= (struct pci_dev
*)iwdev
->hw
.dev_context
;
2733 iwibdev
= (struct i40iw_ib_device
*)ib_alloc_device(sizeof(*iwibdev
));
2735 i40iw_pr_err("iwdev == NULL\n");
2738 strlcpy(iwibdev
->ibdev
.name
, "i40iw%d", IB_DEVICE_NAME_MAX
);
2739 iwibdev
->ibdev
.owner
= THIS_MODULE
;
2740 iwdev
->iwibdev
= iwibdev
;
2741 iwibdev
->iwdev
= iwdev
;
2743 iwibdev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
2744 ether_addr_copy((u8
*)&iwibdev
->ibdev
.node_guid
, netdev
->dev_addr
);
2746 iwibdev
->ibdev
.uverbs_cmd_mask
=
2747 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2748 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2749 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2750 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2751 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2752 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2753 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2754 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2755 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2756 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2757 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
2758 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2759 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2760 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2761 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
2762 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
2763 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
) |
2764 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2765 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
2766 (1ull << IB_USER_VERBS_CMD_POST_SEND
);
2767 iwibdev
->ibdev
.phys_port_cnt
= 1;
2768 iwibdev
->ibdev
.num_comp_vectors
= iwdev
->ceqs_count
;
2769 iwibdev
->ibdev
.dma_device
= &pcidev
->dev
;
2770 iwibdev
->ibdev
.dev
.parent
= &pcidev
->dev
;
2771 iwibdev
->ibdev
.query_port
= i40iw_query_port
;
2772 iwibdev
->ibdev
.modify_port
= i40iw_modify_port
;
2773 iwibdev
->ibdev
.query_pkey
= i40iw_query_pkey
;
2774 iwibdev
->ibdev
.query_gid
= i40iw_query_gid
;
2775 iwibdev
->ibdev
.alloc_ucontext
= i40iw_alloc_ucontext
;
2776 iwibdev
->ibdev
.dealloc_ucontext
= i40iw_dealloc_ucontext
;
2777 iwibdev
->ibdev
.mmap
= i40iw_mmap
;
2778 iwibdev
->ibdev
.alloc_pd
= i40iw_alloc_pd
;
2779 iwibdev
->ibdev
.dealloc_pd
= i40iw_dealloc_pd
;
2780 iwibdev
->ibdev
.create_qp
= i40iw_create_qp
;
2781 iwibdev
->ibdev
.modify_qp
= i40iw_modify_qp
;
2782 iwibdev
->ibdev
.query_qp
= i40iw_query_qp
;
2783 iwibdev
->ibdev
.destroy_qp
= i40iw_destroy_qp
;
2784 iwibdev
->ibdev
.create_cq
= i40iw_create_cq
;
2785 iwibdev
->ibdev
.destroy_cq
= i40iw_destroy_cq
;
2786 iwibdev
->ibdev
.get_dma_mr
= i40iw_get_dma_mr
;
2787 iwibdev
->ibdev
.reg_user_mr
= i40iw_reg_user_mr
;
2788 iwibdev
->ibdev
.dereg_mr
= i40iw_dereg_mr
;
2789 iwibdev
->ibdev
.alloc_hw_stats
= i40iw_alloc_hw_stats
;
2790 iwibdev
->ibdev
.get_hw_stats
= i40iw_get_hw_stats
;
2791 iwibdev
->ibdev
.query_device
= i40iw_query_device
;
2792 iwibdev
->ibdev
.create_ah
= i40iw_create_ah
;
2793 iwibdev
->ibdev
.destroy_ah
= i40iw_destroy_ah
;
2794 iwibdev
->ibdev
.drain_sq
= i40iw_drain_sq
;
2795 iwibdev
->ibdev
.drain_rq
= i40iw_drain_rq
;
2796 iwibdev
->ibdev
.alloc_mr
= i40iw_alloc_mr
;
2797 iwibdev
->ibdev
.map_mr_sg
= i40iw_map_mr_sg
;
2798 iwibdev
->ibdev
.iwcm
= kzalloc(sizeof(*iwibdev
->ibdev
.iwcm
), GFP_KERNEL
);
2799 if (!iwibdev
->ibdev
.iwcm
) {
2800 ib_dealloc_device(&iwibdev
->ibdev
);
2804 iwibdev
->ibdev
.iwcm
->add_ref
= i40iw_add_ref
;
2805 iwibdev
->ibdev
.iwcm
->rem_ref
= i40iw_rem_ref
;
2806 iwibdev
->ibdev
.iwcm
->get_qp
= i40iw_get_qp
;
2807 iwibdev
->ibdev
.iwcm
->connect
= i40iw_connect
;
2808 iwibdev
->ibdev
.iwcm
->accept
= i40iw_accept
;
2809 iwibdev
->ibdev
.iwcm
->reject
= i40iw_reject
;
2810 iwibdev
->ibdev
.iwcm
->create_listen
= i40iw_create_listen
;
2811 iwibdev
->ibdev
.iwcm
->destroy_listen
= i40iw_destroy_listen
;
2812 memcpy(iwibdev
->ibdev
.iwcm
->ifname
, netdev
->name
,
2813 sizeof(iwibdev
->ibdev
.iwcm
->ifname
));
2814 iwibdev
->ibdev
.get_port_immutable
= i40iw_port_immutable
;
2815 iwibdev
->ibdev
.get_dev_fw_str
= i40iw_get_dev_fw_str
;
2816 iwibdev
->ibdev
.poll_cq
= i40iw_poll_cq
;
2817 iwibdev
->ibdev
.req_notify_cq
= i40iw_req_notify_cq
;
2818 iwibdev
->ibdev
.post_send
= i40iw_post_send
;
2819 iwibdev
->ibdev
.post_recv
= i40iw_post_recv
;
2825 * i40iw_port_ibevent - indicate port event
2826 * @iwdev: iwarp device
2828 void i40iw_port_ibevent(struct i40iw_device
*iwdev
)
2830 struct i40iw_ib_device
*iwibdev
= iwdev
->iwibdev
;
2831 struct ib_event event
;
2833 event
.device
= &iwibdev
->ibdev
;
2834 event
.element
.port_num
= 1;
2835 event
.event
= iwdev
->iw_status
? IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
2836 ib_dispatch_event(&event
);
2840 * i40iw_unregister_rdma_device - unregister of iwarp from IB
2841 * @iwibdev: rdma device ptr
2843 static void i40iw_unregister_rdma_device(struct i40iw_ib_device
*iwibdev
)
2847 for (i
= 0; i
< ARRAY_SIZE(i40iw_dev_attributes
); ++i
)
2848 device_remove_file(&iwibdev
->ibdev
.dev
,
2849 i40iw_dev_attributes
[i
]);
2850 ib_unregister_device(&iwibdev
->ibdev
);
2854 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2855 * @iwibdev: IB device ptr
2857 void i40iw_destroy_rdma_device(struct i40iw_ib_device
*iwibdev
)
2862 i40iw_unregister_rdma_device(iwibdev
);
2863 kfree(iwibdev
->ibdev
.iwcm
);
2864 iwibdev
->ibdev
.iwcm
= NULL
;
2865 wait_event_timeout(iwibdev
->iwdev
->close_wq
,
2866 !atomic64_read(&iwibdev
->iwdev
->use_count
),
2867 I40IW_EVENT_TIMEOUT
);
2868 ib_dealloc_device(&iwibdev
->ibdev
);
2872 * i40iw_register_rdma_device - register iwarp device to IB
2873 * @iwdev: iwarp device
2875 int i40iw_register_rdma_device(struct i40iw_device
*iwdev
)
2878 struct i40iw_ib_device
*iwibdev
;
2880 iwdev
->iwibdev
= i40iw_init_rdma_device(iwdev
);
2881 if (!iwdev
->iwibdev
)
2883 iwibdev
= iwdev
->iwibdev
;
2885 ret
= ib_register_device(&iwibdev
->ibdev
, NULL
);
2889 for (i
= 0; i
< ARRAY_SIZE(i40iw_dev_attributes
); ++i
) {
2891 device_create_file(&iwibdev
->ibdev
.dev
,
2892 i40iw_dev_attributes
[i
]);
2896 device_remove_file(&iwibdev
->ibdev
.dev
, i40iw_dev_attributes
[i
]);
2898 ib_unregister_device(&iwibdev
->ibdev
);
2904 kfree(iwdev
->iwibdev
->ibdev
.iwcm
);
2905 iwdev
->iwibdev
->ibdev
.iwcm
= NULL
;
2906 ib_dealloc_device(&iwdev
->iwibdev
->ibdev
);