2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
59 #include "iwch_provider.h"
61 #include <rdma/cxgb3-abi.h>
64 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
65 struct ib_ah_attr
*ah_attr
,
66 struct ib_udata
*udata
)
68 return ERR_PTR(-ENOSYS
);
71 static int iwch_ah_destroy(struct ib_ah
*ah
)
76 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
81 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
86 static int iwch_process_mad(struct ib_device
*ibdev
,
89 const struct ib_wc
*in_wc
,
90 const struct ib_grh
*in_grh
,
91 const struct ib_mad_hdr
*in_mad
,
93 struct ib_mad_hdr
*out_mad
,
95 u16
*out_mad_pkey_index
)
100 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
102 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
103 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
104 struct iwch_mm_entry
*mm
, *tmp
;
106 PDBG("%s context %p\n", __func__
, context
);
107 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
109 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
114 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
115 struct ib_udata
*udata
)
117 struct iwch_ucontext
*context
;
118 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
120 PDBG("%s ibdev %p\n", __func__
, ibdev
);
121 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
123 return ERR_PTR(-ENOMEM
);
124 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
125 INIT_LIST_HEAD(&context
->mmaps
);
126 spin_lock_init(&context
->mmap_lock
);
127 return &context
->ibucontext
;
130 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
134 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
135 chp
= to_iwch_cq(ib_cq
);
137 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
138 atomic_dec(&chp
->refcnt
);
139 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
141 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
146 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
,
147 const struct ib_cq_init_attr
*attr
,
148 struct ib_ucontext
*ib_context
,
149 struct ib_udata
*udata
)
151 int entries
= attr
->cqe
;
152 struct iwch_dev
*rhp
;
154 struct iwch_create_cq_resp uresp
;
155 struct iwch_create_cq_req ureq
;
156 struct iwch_ucontext
*ucontext
= NULL
;
160 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
162 return ERR_PTR(-EINVAL
);
164 rhp
= to_iwch_dev(ibdev
);
165 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
167 return ERR_PTR(-ENOMEM
);
170 ucontext
= to_iwch_ucontext(ib_context
);
171 if (!t3a_device(rhp
)) {
172 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
174 return ERR_PTR(-EFAULT
);
176 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
180 if (t3a_device(rhp
)) {
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
193 entries
= roundup_pow_of_two(entries
);
194 chp
->cq
.size_log2
= ilog2(entries
);
196 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
, !ucontext
)) {
198 return ERR_PTR(-ENOMEM
);
201 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
202 spin_lock_init(&chp
->lock
);
203 spin_lock_init(&chp
->comp_handler_lock
);
204 atomic_set(&chp
->refcnt
, 1);
205 init_waitqueue_head(&chp
->wait
);
206 if (insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
)) {
207 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
209 return ERR_PTR(-ENOMEM
);
213 struct iwch_mm_entry
*mm
;
215 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
217 iwch_destroy_cq(&chp
->ibcq
);
218 return ERR_PTR(-ENOMEM
);
220 uresp
.cqid
= chp
->cq
.cqid
;
221 uresp
.size_log2
= chp
->cq
.size_log2
;
222 spin_lock(&ucontext
->mmap_lock
);
223 uresp
.key
= ucontext
->key
;
224 ucontext
->key
+= PAGE_SIZE
;
225 spin_unlock(&ucontext
->mmap_lock
);
227 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
228 if (udata
->outlen
< sizeof uresp
) {
230 printk(KERN_WARNING MOD
"Warning - "
231 "downlevel libcxgb3 (non-fatal).\n");
232 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
233 sizeof(struct t3_cqe
));
234 resplen
= sizeof(struct iwch_create_cq_resp_v0
);
236 mm
->len
= PAGE_ALIGN(((1UL << uresp
.size_log2
) + 1) *
237 sizeof(struct t3_cqe
));
238 uresp
.memsize
= mm
->len
;
240 resplen
= sizeof uresp
;
242 if (ib_copy_to_udata(udata
, &uresp
, resplen
)) {
244 iwch_destroy_cq(&chp
->ibcq
);
245 return ERR_PTR(-EFAULT
);
247 insert_mmap(ucontext
, mm
);
249 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
250 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
251 (unsigned long long) chp
->cq
.dma_addr
);
255 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
258 struct iwch_cq
*chp
= to_iwch_cq(cq
);
259 struct t3_cq oldcq
, newcq
;
262 PDBG("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
264 /* We don't downsize... */
268 /* create new t3_cq with new size */
269 cqe
= roundup_pow_of_two(cqe
+1);
270 newcq
.size_log2
= ilog2(cqe
);
272 /* Dont allow resize to less than the current wce count */
273 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
277 /* Quiesce all QPs using this CQ */
278 ret
= iwch_quiesce_qps(chp
);
283 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
289 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
290 sizeof(struct t3_cqe
));
292 /* old iwch_qp gets new t3_cq but keeps old cqid */
295 chp
->cq
.cqid
= oldcq
.cqid
;
297 /* resize new t3_cq to update the HW context */
298 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
303 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
305 /* destroy old t3_cq */
306 oldcq
.cqid
= newcq
.cqid
;
307 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
309 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
313 /* add user hooks here */
316 ret
= iwch_resume_qps(chp
);
323 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
325 struct iwch_dev
*rhp
;
327 enum t3_cq_opcode cq_op
;
332 chp
= to_iwch_cq(ibcq
);
334 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
338 if (chp
->user_rptr_addr
) {
339 if (get_user(rptr
, chp
->user_rptr_addr
))
341 spin_lock_irqsave(&chp
->lock
, flag
);
344 spin_lock_irqsave(&chp
->lock
, flag
);
345 PDBG("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
346 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
347 spin_unlock_irqrestore(&chp
->lock
, flag
);
349 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
351 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
356 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
358 int len
= vma
->vm_end
- vma
->vm_start
;
359 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
360 struct cxio_rdev
*rdev_p
;
362 struct iwch_mm_entry
*mm
;
363 struct iwch_ucontext
*ucontext
;
366 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
369 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
373 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
374 ucontext
= to_iwch_ucontext(context
);
376 mm
= remove_mmap(ucontext
, key
, len
);
382 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
383 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
384 rdev_p
->rnic_info
.udbell_len
))) {
387 * Map T3 DB register.
389 if (vma
->vm_flags
& VM_READ
) {
393 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
394 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
395 vma
->vm_flags
&= ~VM_MAYREAD
;
396 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
398 len
, vma
->vm_page_prot
);
402 * Map WQ or CQ contig dma memory...
404 ret
= remap_pfn_range(vma
, vma
->vm_start
,
406 len
, vma
->vm_page_prot
);
412 static int iwch_deallocate_pd(struct ib_pd
*pd
)
414 struct iwch_dev
*rhp
;
417 php
= to_iwch_pd(pd
);
419 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
420 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
425 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
426 struct ib_ucontext
*context
,
427 struct ib_udata
*udata
)
431 struct iwch_dev
*rhp
;
433 PDBG("%s ibdev %p\n", __func__
, ibdev
);
434 rhp
= (struct iwch_dev
*) ibdev
;
435 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
437 return ERR_PTR(-EINVAL
);
438 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
440 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
441 return ERR_PTR(-ENOMEM
);
446 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
447 iwch_deallocate_pd(&php
->ibpd
);
448 return ERR_PTR(-EFAULT
);
451 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
455 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
457 struct iwch_dev
*rhp
;
461 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
463 mhp
= to_iwch_mr(ib_mr
);
466 mmid
= mhp
->attr
.stag
>> 8;
467 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
470 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
472 kfree((void *) (unsigned long) mhp
->kva
);
474 ib_umem_release(mhp
->umem
);
475 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
480 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
482 const u64 total_size
= 0xffffffff;
483 const u64 mask
= (total_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
484 struct iwch_pd
*php
= to_iwch_pd(pd
);
485 struct iwch_dev
*rhp
= php
->rhp
;
488 int shift
= 26, npages
, ret
, i
;
490 PDBG("%s ib_pd %p\n", __func__
, pd
);
493 * T3 only supports 32 bits of size.
495 if (sizeof(phys_addr_t
) > 4) {
496 pr_warn_once(MOD
"Cannot support dma_mrs on this platform.\n");
497 return ERR_PTR(-ENOTSUPP
);
500 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
502 return ERR_PTR(-ENOMEM
);
506 npages
= (total_size
+ (1ULL << shift
) - 1) >> shift
;
512 page_list
= kmalloc_array(npages
, sizeof(u64
), GFP_KERNEL
);
518 for (i
= 0; i
< npages
; i
++)
519 page_list
[i
] = cpu_to_be64((u64
)i
<< shift
);
521 PDBG("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
522 __func__
, mask
, shift
, total_size
, npages
);
524 ret
= iwch_alloc_pbl(mhp
, npages
);
530 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
535 mhp
->attr
.pdid
= php
->pdid
;
538 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
539 mhp
->attr
.va_fbo
= 0;
540 mhp
->attr
.page_size
= shift
- 12;
542 mhp
->attr
.len
= (u32
) total_size
;
543 mhp
->attr
.pbl_size
= npages
;
544 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
558 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
559 u64 virt
, int acc
, struct ib_udata
*udata
)
565 struct iwch_dev
*rhp
;
568 struct iwch_reg_user_mr_resp uresp
;
569 struct scatterlist
*sg
;
570 PDBG("%s ib_pd %p\n", __func__
, pd
);
572 php
= to_iwch_pd(pd
);
574 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
576 return ERR_PTR(-ENOMEM
);
580 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
581 if (IS_ERR(mhp
->umem
)) {
582 err
= PTR_ERR(mhp
->umem
);
587 shift
= ffs(mhp
->umem
->page_size
) - 1;
591 err
= iwch_alloc_pbl(mhp
, n
);
595 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
603 for_each_sg(mhp
->umem
->sg_head
.sgl
, sg
, mhp
->umem
->nmap
, entry
) {
604 len
= sg_dma_len(sg
) >> shift
;
605 for (k
= 0; k
< len
; ++k
) {
606 pages
[i
++] = cpu_to_be64(sg_dma_address(sg
) +
607 mhp
->umem
->page_size
* k
);
608 if (i
== PAGE_SIZE
/ sizeof *pages
) {
609 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
619 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
622 free_page((unsigned long) pages
);
626 mhp
->attr
.pdid
= php
->pdid
;
628 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
629 mhp
->attr
.va_fbo
= virt
;
630 mhp
->attr
.page_size
= shift
- 12;
631 mhp
->attr
.len
= (u32
) length
;
633 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
637 if (udata
&& !t3a_device(rhp
)) {
638 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
639 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
640 PDBG("%s user resp pbl_addr 0x%x\n", __func__
,
643 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
644 iwch_dereg_mr(&mhp
->ibmr
);
656 ib_umem_release(mhp
->umem
);
661 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
662 struct ib_udata
*udata
)
664 struct iwch_dev
*rhp
;
671 if (type
!= IB_MW_TYPE_1
)
672 return ERR_PTR(-EINVAL
);
674 php
= to_iwch_pd(pd
);
676 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
678 return ERR_PTR(-ENOMEM
);
679 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
685 mhp
->attr
.pdid
= php
->pdid
;
686 mhp
->attr
.type
= TPT_MW
;
687 mhp
->attr
.stag
= stag
;
689 mhp
->ibmw
.rkey
= stag
;
690 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
691 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
693 return ERR_PTR(-ENOMEM
);
695 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
699 static int iwch_dealloc_mw(struct ib_mw
*mw
)
701 struct iwch_dev
*rhp
;
705 mhp
= to_iwch_mw(mw
);
707 mmid
= (mw
->rkey
) >> 8;
708 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
709 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
710 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
715 static struct ib_mr
*iwch_alloc_mr(struct ib_pd
*pd
,
716 enum ib_mr_type mr_type
,
719 struct iwch_dev
*rhp
;
726 if (mr_type
!= IB_MR_TYPE_MEM_REG
||
727 max_num_sg
> T3_MAX_FASTREG_DEPTH
)
728 return ERR_PTR(-EINVAL
);
730 php
= to_iwch_pd(pd
);
732 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
736 mhp
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
743 ret
= iwch_alloc_pbl(mhp
, max_num_sg
);
746 mhp
->attr
.pbl_size
= max_num_sg
;
747 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
748 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
751 mhp
->attr
.pdid
= php
->pdid
;
752 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
753 mhp
->attr
.stag
= stag
;
756 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
757 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
))
760 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
763 cxio_dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
775 static int iwch_set_page(struct ib_mr
*ibmr
, u64 addr
)
777 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
779 if (unlikely(mhp
->npages
== mhp
->attr
.pbl_size
))
782 mhp
->pages
[mhp
->npages
++] = addr
;
787 static int iwch_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
788 int sg_nents
, unsigned int *sg_offset
)
790 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
794 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, iwch_set_page
);
797 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
799 struct iwch_dev
*rhp
;
801 struct iwch_qp_attributes attrs
;
802 struct iwch_ucontext
*ucontext
;
804 qhp
= to_iwch_qp(ib_qp
);
807 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
808 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
809 wait_event(qhp
->wait
, !qhp
->ep
);
811 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
813 atomic_dec(&qhp
->refcnt
);
814 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
816 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
818 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
819 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
821 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
822 ib_qp
, qhp
->wq
.qpid
, qhp
);
827 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
828 struct ib_qp_init_attr
*attrs
,
829 struct ib_udata
*udata
)
831 struct iwch_dev
*rhp
;
834 struct iwch_cq
*schp
;
835 struct iwch_cq
*rchp
;
836 struct iwch_create_qp_resp uresp
;
837 int wqsize
, sqsize
, rqsize
;
838 struct iwch_ucontext
*ucontext
;
840 PDBG("%s ib_pd %p\n", __func__
, pd
);
841 if (attrs
->qp_type
!= IB_QPT_RC
)
842 return ERR_PTR(-EINVAL
);
843 php
= to_iwch_pd(pd
);
845 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
846 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
848 return ERR_PTR(-EINVAL
);
850 /* The RQT size must be # of entries + 1 rounded up to a power of two */
851 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
852 if (rqsize
== attrs
->cap
.max_recv_wr
)
853 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
855 /* T3 doesn't support RQT depth < 16 */
859 if (rqsize
> T3_MAX_RQ_SIZE
)
860 return ERR_PTR(-EINVAL
);
862 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
863 return ERR_PTR(-EINVAL
);
866 * NOTE: The SQ and total WQ sizes don't need to be
867 * a power of two. However, all the code assumes
868 * they are. EG: Q_FREECNT() and friends.
870 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
871 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
874 * Kernel users need more wq space for fastreg WRs which can take
877 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
878 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
879 wqsize
= roundup_pow_of_two(rqsize
+
880 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
881 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
882 wqsize
, sqsize
, rqsize
);
883 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
885 return ERR_PTR(-ENOMEM
);
886 qhp
->wq
.size_log2
= ilog2(wqsize
);
887 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
888 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
889 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
890 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
892 return ERR_PTR(-ENOMEM
);
895 attrs
->cap
.max_recv_wr
= rqsize
- 1;
896 attrs
->cap
.max_send_wr
= sqsize
;
897 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
900 qhp
->attr
.pd
= php
->pdid
;
901 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
902 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
903 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
904 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
905 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
906 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
907 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
908 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
909 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
912 * XXX - These don't get passed in from the openib user
913 * at create time. The CM sets them via a QP modify.
914 * Need to fix... I think the CM should
916 qhp
->attr
.enable_rdma_read
= 1;
917 qhp
->attr
.enable_rdma_write
= 1;
918 qhp
->attr
.enable_bind
= 1;
919 qhp
->attr
.max_ord
= 1;
920 qhp
->attr
.max_ird
= 1;
922 spin_lock_init(&qhp
->lock
);
923 init_waitqueue_head(&qhp
->wait
);
924 atomic_set(&qhp
->refcnt
, 1);
926 if (insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
)) {
927 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
928 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
930 return ERR_PTR(-ENOMEM
);
935 struct iwch_mm_entry
*mm1
, *mm2
;
937 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
939 iwch_destroy_qp(&qhp
->ibqp
);
940 return ERR_PTR(-ENOMEM
);
943 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
946 iwch_destroy_qp(&qhp
->ibqp
);
947 return ERR_PTR(-ENOMEM
);
950 uresp
.qpid
= qhp
->wq
.qpid
;
951 uresp
.size_log2
= qhp
->wq
.size_log2
;
952 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
953 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
954 spin_lock(&ucontext
->mmap_lock
);
955 uresp
.key
= ucontext
->key
;
956 ucontext
->key
+= PAGE_SIZE
;
957 uresp
.db_key
= ucontext
->key
;
958 ucontext
->key
+= PAGE_SIZE
;
959 spin_unlock(&ucontext
->mmap_lock
);
960 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
963 iwch_destroy_qp(&qhp
->ibqp
);
964 return ERR_PTR(-EFAULT
);
966 mm1
->key
= uresp
.key
;
967 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
968 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
969 insert_mmap(ucontext
, mm1
);
970 mm2
->key
= uresp
.db_key
;
971 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
972 mm2
->len
= PAGE_SIZE
;
973 insert_mmap(ucontext
, mm2
);
975 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
976 init_timer(&(qhp
->timer
));
977 PDBG("%s sq_num_entries %d, rq_num_entries %d "
978 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
979 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
980 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
981 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
985 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
986 int attr_mask
, struct ib_udata
*udata
)
988 struct iwch_dev
*rhp
;
990 enum iwch_qp_attr_mask mask
= 0;
991 struct iwch_qp_attributes attrs
;
993 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
995 /* iwarp does not support the RTR state */
996 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
997 attr_mask
&= ~IB_QP_STATE
;
999 /* Make sure we still have something left to do */
1003 memset(&attrs
, 0, sizeof attrs
);
1004 qhp
= to_iwch_qp(ibqp
);
1007 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1008 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1009 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1010 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1011 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1012 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1015 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1016 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1017 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1018 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1019 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1021 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1024 void iwch_qp_add_ref(struct ib_qp
*qp
)
1026 PDBG("%s ib_qp %p\n", __func__
, qp
);
1027 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1030 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1032 PDBG("%s ib_qp %p\n", __func__
, qp
);
1033 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1034 wake_up(&(to_iwch_qp(qp
)->wait
));
1037 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1039 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1040 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1044 static int iwch_query_pkey(struct ib_device
*ibdev
,
1045 u8 port
, u16 index
, u16
* pkey
)
1047 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1052 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1053 int index
, union ib_gid
*gid
)
1055 struct iwch_dev
*dev
;
1057 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1058 __func__
, ibdev
, port
, index
, gid
);
1059 dev
= to_iwch_dev(ibdev
);
1060 BUG_ON(port
== 0 || port
> 2);
1061 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1062 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1066 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1068 struct ethtool_drvinfo info
;
1069 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1071 unsigned fw_maj
, fw_min
, fw_mic
;
1073 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1075 next
= info
.fw_version
+ 1;
1076 cp
= strsep(&next
, ".");
1077 sscanf(cp
, "%i", &fw_maj
);
1078 cp
= strsep(&next
, ".");
1079 sscanf(cp
, "%i", &fw_min
);
1080 cp
= strsep(&next
, ".");
1081 sscanf(cp
, "%i", &fw_mic
);
1083 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1087 static int iwch_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
1088 struct ib_udata
*uhw
)
1091 struct iwch_dev
*dev
;
1093 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1095 if (uhw
->inlen
|| uhw
->outlen
)
1098 dev
= to_iwch_dev(ibdev
);
1099 memset(props
, 0, sizeof *props
);
1100 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1101 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1102 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1103 props
->device_cap_flags
= dev
->device_cap_flags
;
1104 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1105 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1106 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1107 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1108 props
->max_qp
= dev
->attr
.max_qps
;
1109 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1110 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1111 props
->max_sge_rd
= 1;
1112 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1113 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1114 props
->max_cq
= dev
->attr
.max_cqs
;
1115 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1116 props
->max_mr
= dev
->attr
.max_mem_regs
;
1117 props
->max_pd
= dev
->attr
.max_pds
;
1118 props
->local_ca_ack_delay
= 0;
1119 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1124 static int iwch_query_port(struct ib_device
*ibdev
,
1125 u8 port
, struct ib_port_attr
*props
)
1127 struct iwch_dev
*dev
;
1128 struct net_device
*netdev
;
1129 struct in_device
*inetdev
;
1131 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1133 dev
= to_iwch_dev(ibdev
);
1134 netdev
= dev
->rdev
.port_info
.lldevs
[port
-1];
1136 memset(props
, 0, sizeof(struct ib_port_attr
));
1137 props
->max_mtu
= IB_MTU_4096
;
1138 props
->active_mtu
= ib_mtu_int_to_enum(netdev
->mtu
);
1140 if (!netif_carrier_ok(netdev
))
1141 props
->state
= IB_PORT_DOWN
;
1143 inetdev
= in_dev_get(netdev
);
1145 if (inetdev
->ifa_list
)
1146 props
->state
= IB_PORT_ACTIVE
;
1148 props
->state
= IB_PORT_INIT
;
1149 in_dev_put(inetdev
);
1151 props
->state
= IB_PORT_INIT
;
1154 props
->port_cap_flags
=
1156 IB_PORT_SNMP_TUNNEL_SUP
|
1157 IB_PORT_REINIT_SUP
|
1158 IB_PORT_DEVICE_MGMT_SUP
|
1159 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1160 props
->gid_tbl_len
= 1;
1161 props
->pkey_tbl_len
= 1;
1162 props
->active_width
= 2;
1163 props
->active_speed
= IB_SPEED_DDR
;
1164 props
->max_msg_sz
= -1;
1169 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1172 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1174 PDBG("%s dev 0x%p\n", __func__
, dev
);
1175 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1178 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1181 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1183 struct ethtool_drvinfo info
;
1184 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1186 PDBG("%s dev 0x%p\n", __func__
, dev
);
1187 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1188 return sprintf(buf
, "%s\n", info
.driver
);
1191 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1194 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1196 PDBG("%s dev 0x%p\n", __func__
, dev
);
1197 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1198 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1230 static const char * const names
[] = {
1231 [IPINRECEIVES
] = "ipInReceives",
1232 [IPINHDRERRORS
] = "ipInHdrErrors",
1233 [IPINADDRERRORS
] = "ipInAddrErrors",
1234 [IPINUNKNOWNPROTOS
] = "ipInUnknownProtos",
1235 [IPINDISCARDS
] = "ipInDiscards",
1236 [IPINDELIVERS
] = "ipInDelivers",
1237 [IPOUTREQUESTS
] = "ipOutRequests",
1238 [IPOUTDISCARDS
] = "ipOutDiscards",
1239 [IPOUTNOROUTES
] = "ipOutNoRoutes",
1240 [IPREASMTIMEOUT
] = "ipReasmTimeout",
1241 [IPREASMREQDS
] = "ipReasmReqds",
1242 [IPREASMOKS
] = "ipReasmOKs",
1243 [IPREASMFAILS
] = "ipReasmFails",
1244 [TCPACTIVEOPENS
] = "tcpActiveOpens",
1245 [TCPPASSIVEOPENS
] = "tcpPassiveOpens",
1246 [TCPATTEMPTFAILS
] = "tcpAttemptFails",
1247 [TCPESTABRESETS
] = "tcpEstabResets",
1248 [TCPCURRESTAB
] = "tcpCurrEstab",
1249 [TCPINSEGS
] = "tcpInSegs",
1250 [TCPOUTSEGS
] = "tcpOutSegs",
1251 [TCPRETRANSSEGS
] = "tcpRetransSegs",
1252 [TCPINERRS
] = "tcpInErrs",
1253 [TCPOUTRSTS
] = "tcpOutRsts",
1254 [TCPRTOMIN
] = "tcpRtoMin",
1255 [TCPRTOMAX
] = "tcpRtoMax",
1258 static struct rdma_hw_stats
*iwch_alloc_stats(struct ib_device
*ibdev
,
1261 BUILD_BUG_ON(ARRAY_SIZE(names
) != NR_COUNTERS
);
1263 /* Our driver only supports device level stats */
1267 return rdma_alloc_hw_stats_struct(names
, NR_COUNTERS
,
1268 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
1271 static int iwch_get_mib(struct ib_device
*ibdev
, struct rdma_hw_stats
*stats
,
1274 struct iwch_dev
*dev
;
1275 struct tp_mib_stats m
;
1278 if (port
!= 0 || !stats
)
1281 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1282 dev
= to_iwch_dev(ibdev
);
1283 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1287 stats
->value
[IPINRECEIVES
] = ((u64
)m
.ipInReceive_hi
<< 32) + m
.ipInReceive_lo
;
1288 stats
->value
[IPINHDRERRORS
] = ((u64
)m
.ipInHdrErrors_hi
<< 32) + m
.ipInHdrErrors_lo
;
1289 stats
->value
[IPINADDRERRORS
] = ((u64
)m
.ipInAddrErrors_hi
<< 32) + m
.ipInAddrErrors_lo
;
1290 stats
->value
[IPINUNKNOWNPROTOS
] = ((u64
)m
.ipInUnknownProtos_hi
<< 32) + m
.ipInUnknownProtos_lo
;
1291 stats
->value
[IPINDISCARDS
] = ((u64
)m
.ipInDiscards_hi
<< 32) + m
.ipInDiscards_lo
;
1292 stats
->value
[IPINDELIVERS
] = ((u64
)m
.ipInDelivers_hi
<< 32) + m
.ipInDelivers_lo
;
1293 stats
->value
[IPOUTREQUESTS
] = ((u64
)m
.ipOutRequests_hi
<< 32) + m
.ipOutRequests_lo
;
1294 stats
->value
[IPOUTDISCARDS
] = ((u64
)m
.ipOutDiscards_hi
<< 32) + m
.ipOutDiscards_lo
;
1295 stats
->value
[IPOUTNOROUTES
] = ((u64
)m
.ipOutNoRoutes_hi
<< 32) + m
.ipOutNoRoutes_lo
;
1296 stats
->value
[IPREASMTIMEOUT
] = m
.ipReasmTimeout
;
1297 stats
->value
[IPREASMREQDS
] = m
.ipReasmReqds
;
1298 stats
->value
[IPREASMOKS
] = m
.ipReasmOKs
;
1299 stats
->value
[IPREASMFAILS
] = m
.ipReasmFails
;
1300 stats
->value
[TCPACTIVEOPENS
] = m
.tcpActiveOpens
;
1301 stats
->value
[TCPPASSIVEOPENS
] = m
.tcpPassiveOpens
;
1302 stats
->value
[TCPATTEMPTFAILS
] = m
.tcpAttemptFails
;
1303 stats
->value
[TCPESTABRESETS
] = m
.tcpEstabResets
;
1304 stats
->value
[TCPCURRESTAB
] = m
.tcpOutRsts
;
1305 stats
->value
[TCPINSEGS
] = m
.tcpCurrEstab
;
1306 stats
->value
[TCPOUTSEGS
] = ((u64
)m
.tcpInSegs_hi
<< 32) + m
.tcpInSegs_lo
;
1307 stats
->value
[TCPRETRANSSEGS
] = ((u64
)m
.tcpOutSegs_hi
<< 32) + m
.tcpOutSegs_lo
;
1308 stats
->value
[TCPINERRS
] = ((u64
)m
.tcpRetransSeg_hi
<< 32) + m
.tcpRetransSeg_lo
,
1309 stats
->value
[TCPOUTRSTS
] = ((u64
)m
.tcpInErrs_hi
<< 32) + m
.tcpInErrs_lo
;
1310 stats
->value
[TCPRTOMIN
] = m
.tcpRtoMin
;
1311 stats
->value
[TCPRTOMAX
] = m
.tcpRtoMax
;
1313 return stats
->num_counters
;
1316 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1317 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1318 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1320 static struct device_attribute
*iwch_class_attributes
[] = {
1326 static int iwch_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
1327 struct ib_port_immutable
*immutable
)
1329 struct ib_port_attr attr
;
1332 err
= iwch_query_port(ibdev
, port_num
, &attr
);
1336 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
1337 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
1338 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
1343 static void get_dev_fw_ver_str(struct ib_device
*ibdev
, char *str
,
1346 struct iwch_dev
*iwch_dev
= to_iwch_dev(ibdev
);
1347 struct ethtool_drvinfo info
;
1348 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1350 PDBG("%s dev 0x%p\n", __func__
, iwch_dev
);
1351 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1352 snprintf(str
, str_len
, "%s", info
.fw_version
);
1355 int iwch_register_device(struct iwch_dev
*dev
)
1360 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1361 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1362 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1363 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1364 dev
->ibdev
.owner
= THIS_MODULE
;
1365 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1366 IB_DEVICE_MEM_WINDOW
|
1367 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1369 /* cxgb3 supports STag 0. */
1370 dev
->ibdev
.local_dma_lkey
= 0;
1372 dev
->ibdev
.uverbs_cmd_mask
=
1373 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1374 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1375 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1376 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1377 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1378 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1379 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1380 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1381 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1382 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1383 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1384 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1385 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1386 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1387 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1388 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1389 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1390 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1391 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC
) > IB_DEVICE_NODE_DESC_MAX
);
1392 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1393 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1394 dev
->ibdev
.num_comp_vectors
= 1;
1395 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1396 dev
->ibdev
.query_device
= iwch_query_device
;
1397 dev
->ibdev
.query_port
= iwch_query_port
;
1398 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1399 dev
->ibdev
.query_gid
= iwch_query_gid
;
1400 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1401 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1402 dev
->ibdev
.mmap
= iwch_mmap
;
1403 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1404 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1405 dev
->ibdev
.create_ah
= iwch_ah_create
;
1406 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1407 dev
->ibdev
.create_qp
= iwch_create_qp
;
1408 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1409 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1410 dev
->ibdev
.create_cq
= iwch_create_cq
;
1411 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1412 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1413 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1414 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1415 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1416 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1417 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1418 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1419 dev
->ibdev
.alloc_mr
= iwch_alloc_mr
;
1420 dev
->ibdev
.map_mr_sg
= iwch_map_mr_sg
;
1421 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1422 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1423 dev
->ibdev
.process_mad
= iwch_process_mad
;
1424 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1425 dev
->ibdev
.post_send
= iwch_post_send
;
1426 dev
->ibdev
.post_recv
= iwch_post_receive
;
1427 dev
->ibdev
.alloc_hw_stats
= iwch_alloc_stats
;
1428 dev
->ibdev
.get_hw_stats
= iwch_get_mib
;
1429 dev
->ibdev
.uverbs_abi_ver
= IWCH_UVERBS_ABI_VERSION
;
1430 dev
->ibdev
.get_port_immutable
= iwch_port_immutable
;
1431 dev
->ibdev
.get_dev_fw_str
= get_dev_fw_ver_str
;
1433 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1434 if (!dev
->ibdev
.iwcm
)
1437 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1438 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1439 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1440 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1441 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1442 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1443 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1444 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1445 memcpy(dev
->ibdev
.iwcm
->ifname
, dev
->rdev
.t3cdev_p
->lldev
->name
,
1446 sizeof(dev
->ibdev
.iwcm
->ifname
));
1448 ret
= ib_register_device(&dev
->ibdev
, NULL
);
1452 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1453 ret
= device_create_file(&dev
->ibdev
.dev
,
1454 iwch_class_attributes
[i
]);
1461 ib_unregister_device(&dev
->ibdev
);
1463 kfree(dev
->ibdev
.iwcm
);
1467 void iwch_unregister_device(struct iwch_dev
*dev
)
1471 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1472 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1473 device_remove_file(&dev
->ibdev
.dev
,
1474 iwch_class_attributes
[i
]);
1475 ib_unregister_device(&dev
->ibdev
);
1476 kfree(dev
->ibdev
.iwcm
);