2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
46 #include <asm/byteorder.h>
48 #include <rdma/iw_cm.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_umem.h>
52 #include <rdma/ib_user_verbs.h>
56 #include "iwch_provider.h"
58 #include "iwch_user.h"
61 static int iwch_modify_port(struct ib_device
*ibdev
,
62 u8 port
, int port_modify_mask
,
63 struct ib_port_modify
*props
)
68 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
69 struct ib_ah_attr
*ah_attr
)
71 return ERR_PTR(-ENOSYS
);
74 static int iwch_ah_destroy(struct ib_ah
*ah
)
79 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
84 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
89 static int iwch_process_mad(struct ib_device
*ibdev
,
93 struct ib_grh
*in_grh
,
94 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
99 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
101 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
102 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
103 struct iwch_mm_entry
*mm
, *tmp
;
105 PDBG("%s context %p\n", __func__
, context
);
106 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
108 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
113 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
114 struct ib_udata
*udata
)
116 struct iwch_ucontext
*context
;
117 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
119 PDBG("%s ibdev %p\n", __func__
, ibdev
);
120 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
122 return ERR_PTR(-ENOMEM
);
123 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
124 INIT_LIST_HEAD(&context
->mmaps
);
125 spin_lock_init(&context
->mmap_lock
);
126 return &context
->ibucontext
;
129 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
133 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
134 chp
= to_iwch_cq(ib_cq
);
136 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
137 atomic_dec(&chp
->refcnt
);
138 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
140 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
145 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
, int entries
, int vector
,
146 struct ib_ucontext
*ib_context
,
147 struct ib_udata
*udata
)
149 struct iwch_dev
*rhp
;
151 struct iwch_create_cq_resp uresp
;
152 struct iwch_create_cq_req ureq
;
153 struct iwch_ucontext
*ucontext
= NULL
;
155 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
156 rhp
= to_iwch_dev(ibdev
);
157 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
159 return ERR_PTR(-ENOMEM
);
162 ucontext
= to_iwch_ucontext(ib_context
);
163 if (!t3a_device(rhp
)) {
164 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
166 return ERR_PTR(-EFAULT
);
168 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
172 if (t3a_device(rhp
)) {
175 * T3A: Add some fluff to handle extra CQEs inserted
176 * for various errors.
177 * Additional CQE possibilities:
179 * incoming RDMA WRITE Failures
180 * incoming RDMA READ REQUEST FAILUREs
181 * NOTE: We cannot ensure the CQ won't overflow.
185 entries
= roundup_pow_of_two(entries
);
186 chp
->cq
.size_log2
= ilog2(entries
);
188 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
)) {
190 return ERR_PTR(-ENOMEM
);
193 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
194 spin_lock_init(&chp
->lock
);
195 atomic_set(&chp
->refcnt
, 1);
196 init_waitqueue_head(&chp
->wait
);
197 insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
);
200 struct iwch_mm_entry
*mm
;
202 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
204 iwch_destroy_cq(&chp
->ibcq
);
205 return ERR_PTR(-ENOMEM
);
207 uresp
.cqid
= chp
->cq
.cqid
;
208 uresp
.size_log2
= chp
->cq
.size_log2
;
209 spin_lock(&ucontext
->mmap_lock
);
210 uresp
.key
= ucontext
->key
;
211 ucontext
->key
+= PAGE_SIZE
;
212 spin_unlock(&ucontext
->mmap_lock
);
213 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
215 iwch_destroy_cq(&chp
->ibcq
);
216 return ERR_PTR(-EFAULT
);
219 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
220 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
221 sizeof (struct t3_cqe
));
222 insert_mmap(ucontext
, mm
);
224 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
225 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
226 (unsigned long long) chp
->cq
.dma_addr
);
230 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
233 struct iwch_cq
*chp
= to_iwch_cq(cq
);
234 struct t3_cq oldcq
, newcq
;
237 PDBG("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
239 /* We don't downsize... */
243 /* create new t3_cq with new size */
244 cqe
= roundup_pow_of_two(cqe
+1);
245 newcq
.size_log2
= ilog2(cqe
);
247 /* Dont allow resize to less than the current wce count */
248 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
252 /* Quiesce all QPs using this CQ */
253 ret
= iwch_quiesce_qps(chp
);
258 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
264 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
265 sizeof(struct t3_cqe
));
267 /* old iwch_qp gets new t3_cq but keeps old cqid */
270 chp
->cq
.cqid
= oldcq
.cqid
;
272 /* resize new t3_cq to update the HW context */
273 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
278 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
280 /* destroy old t3_cq */
281 oldcq
.cqid
= newcq
.cqid
;
282 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
284 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
288 /* add user hooks here */
291 ret
= iwch_resume_qps(chp
);
298 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
300 struct iwch_dev
*rhp
;
302 enum t3_cq_opcode cq_op
;
307 chp
= to_iwch_cq(ibcq
);
309 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
313 if (chp
->user_rptr_addr
) {
314 if (get_user(rptr
, chp
->user_rptr_addr
))
316 spin_lock_irqsave(&chp
->lock
, flag
);
319 spin_lock_irqsave(&chp
->lock
, flag
);
320 PDBG("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
321 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
322 spin_unlock_irqrestore(&chp
->lock
, flag
);
324 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
326 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
331 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
333 int len
= vma
->vm_end
- vma
->vm_start
;
334 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
335 struct cxio_rdev
*rdev_p
;
337 struct iwch_mm_entry
*mm
;
338 struct iwch_ucontext
*ucontext
;
341 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
344 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
348 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
349 ucontext
= to_iwch_ucontext(context
);
351 mm
= remove_mmap(ucontext
, key
, len
);
357 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
358 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
359 rdev_p
->rnic_info
.udbell_len
))) {
362 * Map T3 DB register.
364 if (vma
->vm_flags
& VM_READ
) {
368 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
369 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
370 vma
->vm_flags
&= ~VM_MAYREAD
;
371 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
373 len
, vma
->vm_page_prot
);
377 * Map WQ or CQ contig dma memory...
379 ret
= remap_pfn_range(vma
, vma
->vm_start
,
381 len
, vma
->vm_page_prot
);
387 static int iwch_deallocate_pd(struct ib_pd
*pd
)
389 struct iwch_dev
*rhp
;
392 php
= to_iwch_pd(pd
);
394 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
395 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
400 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
401 struct ib_ucontext
*context
,
402 struct ib_udata
*udata
)
406 struct iwch_dev
*rhp
;
408 PDBG("%s ibdev %p\n", __func__
, ibdev
);
409 rhp
= (struct iwch_dev
*) ibdev
;
410 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
412 return ERR_PTR(-EINVAL
);
413 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
415 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
416 return ERR_PTR(-ENOMEM
);
421 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
422 iwch_deallocate_pd(&php
->ibpd
);
423 return ERR_PTR(-EFAULT
);
426 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
430 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
432 struct iwch_dev
*rhp
;
436 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
437 /* There can be no memory windows */
438 if (atomic_read(&ib_mr
->usecnt
))
441 mhp
= to_iwch_mr(ib_mr
);
443 mmid
= mhp
->attr
.stag
>> 8;
444 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
447 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
449 kfree((void *) (unsigned long) mhp
->kva
);
451 ib_umem_release(mhp
->umem
);
452 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
457 static struct ib_mr
*iwch_register_phys_mem(struct ib_pd
*pd
,
458 struct ib_phys_buf
*buffer_list
,
467 struct iwch_dev
*rhp
;
472 PDBG("%s ib_pd %p\n", __func__
, pd
);
473 php
= to_iwch_pd(pd
);
476 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
478 return ERR_PTR(-ENOMEM
);
482 /* First check that we have enough alignment */
483 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
488 if (num_phys_buf
> 1 &&
489 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
494 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
495 &total_size
, &npages
, &shift
, &page_list
);
499 ret
= iwch_alloc_pbl(mhp
, npages
);
505 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
510 mhp
->attr
.pdid
= php
->pdid
;
513 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
514 mhp
->attr
.va_fbo
= *iova_start
;
515 mhp
->attr
.page_size
= shift
- 12;
517 mhp
->attr
.len
= (u32
) total_size
;
518 mhp
->attr
.pbl_size
= npages
;
519 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
534 static int iwch_reregister_phys_mem(struct ib_mr
*mr
,
537 struct ib_phys_buf
*buffer_list
,
539 int acc
, u64
* iova_start
)
542 struct iwch_mr mh
, *mhp
;
544 struct iwch_dev
*rhp
;
545 __be64
*page_list
= NULL
;
551 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
553 /* There can be no memory windows */
554 if (atomic_read(&mr
->usecnt
))
557 mhp
= to_iwch_mr(mr
);
559 php
= to_iwch_pd(mr
->pd
);
561 /* make sure we are on the same adapter */
565 memcpy(&mh
, mhp
, sizeof *mhp
);
567 if (mr_rereg_mask
& IB_MR_REREG_PD
)
568 php
= to_iwch_pd(pd
);
569 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
570 mh
.attr
.perms
= iwch_ib_to_tpt_access(acc
);
571 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
572 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
574 &total_size
, &npages
,
580 ret
= iwch_reregister_mem(rhp
, php
, &mh
, shift
, npages
);
585 if (mr_rereg_mask
& IB_MR_REREG_PD
)
586 mhp
->attr
.pdid
= php
->pdid
;
587 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
588 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
589 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
591 mhp
->attr
.va_fbo
= *iova_start
;
592 mhp
->attr
.page_size
= shift
- 12;
593 mhp
->attr
.len
= (u32
) total_size
;
594 mhp
->attr
.pbl_size
= npages
;
601 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
602 u64 virt
, int acc
, struct ib_udata
*udata
)
608 struct ib_umem_chunk
*chunk
;
609 struct iwch_dev
*rhp
;
612 struct iwch_reg_user_mr_resp uresp
;
614 PDBG("%s ib_pd %p\n", __func__
, pd
);
616 php
= to_iwch_pd(pd
);
618 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
620 return ERR_PTR(-ENOMEM
);
624 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
625 if (IS_ERR(mhp
->umem
)) {
626 err
= PTR_ERR(mhp
->umem
);
631 shift
= ffs(mhp
->umem
->page_size
) - 1;
634 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
637 err
= iwch_alloc_pbl(mhp
, n
);
641 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
649 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
650 for (j
= 0; j
< chunk
->nmap
; ++j
) {
651 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
652 for (k
= 0; k
< len
; ++k
) {
653 pages
[i
++] = cpu_to_be64(sg_dma_address(
654 &chunk
->page_list
[j
]) +
655 mhp
->umem
->page_size
* k
);
656 if (i
== PAGE_SIZE
/ sizeof *pages
) {
657 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
667 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
670 free_page((unsigned long) pages
);
674 mhp
->attr
.pdid
= php
->pdid
;
676 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
677 mhp
->attr
.va_fbo
= virt
;
678 mhp
->attr
.page_size
= shift
- 12;
679 mhp
->attr
.len
= (u32
) length
;
681 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
685 if (udata
&& !t3a_device(rhp
)) {
686 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
687 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
688 PDBG("%s user resp pbl_addr 0x%x\n", __func__
,
691 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
692 iwch_dereg_mr(&mhp
->ibmr
);
704 ib_umem_release(mhp
->umem
);
709 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
711 struct ib_phys_buf bl
;
715 PDBG("%s ib_pd %p\n", __func__
, pd
);
718 * T3 only supports 32 bits of size.
720 bl
.size
= 0xffffffff;
723 ibmr
= iwch_register_phys_mem(pd
, &bl
, 1, acc
, &kva
);
727 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
)
729 struct iwch_dev
*rhp
;
736 php
= to_iwch_pd(pd
);
738 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
740 return ERR_PTR(-ENOMEM
);
741 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
747 mhp
->attr
.pdid
= php
->pdid
;
748 mhp
->attr
.type
= TPT_MW
;
749 mhp
->attr
.stag
= stag
;
751 mhp
->ibmw
.rkey
= stag
;
752 insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
);
753 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
757 static int iwch_dealloc_mw(struct ib_mw
*mw
)
759 struct iwch_dev
*rhp
;
763 mhp
= to_iwch_mw(mw
);
765 mmid
= (mw
->rkey
) >> 8;
766 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
767 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
769 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
773 static struct ib_mr
*iwch_alloc_fast_reg_mr(struct ib_pd
*pd
, int pbl_depth
)
775 struct iwch_dev
*rhp
;
782 php
= to_iwch_pd(pd
);
784 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
786 return ERR_PTR(-ENOMEM
);
789 ret
= iwch_alloc_pbl(mhp
, pbl_depth
);
794 mhp
->attr
.pbl_size
= pbl_depth
;
795 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
796 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
802 mhp
->attr
.pdid
= php
->pdid
;
803 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
804 mhp
->attr
.stag
= stag
;
807 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
808 insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
);
809 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
813 static struct ib_fast_reg_page_list
*iwch_alloc_fastreg_pbl(
814 struct ib_device
*device
,
817 struct ib_fast_reg_page_list
*page_list
;
819 page_list
= kmalloc(sizeof *page_list
+ page_list_len
* sizeof(u64
),
822 return ERR_PTR(-ENOMEM
);
824 page_list
->page_list
= (u64
*)(page_list
+ 1);
825 page_list
->max_page_list_len
= page_list_len
;
830 static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list
*page_list
)
835 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
837 struct iwch_dev
*rhp
;
839 struct iwch_qp_attributes attrs
;
840 struct iwch_ucontext
*ucontext
;
842 qhp
= to_iwch_qp(ib_qp
);
845 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
846 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
847 wait_event(qhp
->wait
, !qhp
->ep
);
849 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
851 atomic_dec(&qhp
->refcnt
);
852 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
854 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
856 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
857 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
859 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
860 ib_qp
, qhp
->wq
.qpid
, qhp
);
865 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
866 struct ib_qp_init_attr
*attrs
,
867 struct ib_udata
*udata
)
869 struct iwch_dev
*rhp
;
872 struct iwch_cq
*schp
;
873 struct iwch_cq
*rchp
;
874 struct iwch_create_qp_resp uresp
;
875 int wqsize
, sqsize
, rqsize
;
876 struct iwch_ucontext
*ucontext
;
878 PDBG("%s ib_pd %p\n", __func__
, pd
);
879 if (attrs
->qp_type
!= IB_QPT_RC
)
880 return ERR_PTR(-EINVAL
);
881 php
= to_iwch_pd(pd
);
883 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
884 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
886 return ERR_PTR(-EINVAL
);
888 /* The RQT size must be # of entries + 1 rounded up to a power of two */
889 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
890 if (rqsize
== attrs
->cap
.max_recv_wr
)
891 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
893 /* T3 doesn't support RQT depth < 16 */
897 if (rqsize
> T3_MAX_RQ_SIZE
)
898 return ERR_PTR(-EINVAL
);
900 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
901 return ERR_PTR(-EINVAL
);
904 * NOTE: The SQ and total WQ sizes don't need to be
905 * a power of two. However, all the code assumes
906 * they are. EG: Q_FREECNT() and friends.
908 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
909 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
912 * Kernel users need more wq space for fastreg WRs which can take
915 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
916 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
917 wqsize
= roundup_pow_of_two(rqsize
+
918 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
919 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
920 wqsize
, sqsize
, rqsize
);
921 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
923 return ERR_PTR(-ENOMEM
);
924 qhp
->wq
.size_log2
= ilog2(wqsize
);
925 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
926 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
927 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
928 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
930 return ERR_PTR(-ENOMEM
);
933 attrs
->cap
.max_recv_wr
= rqsize
- 1;
934 attrs
->cap
.max_send_wr
= sqsize
;
935 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
938 qhp
->attr
.pd
= php
->pdid
;
939 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
940 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
941 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
942 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
943 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
944 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
945 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
946 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
947 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
950 * XXX - These don't get passed in from the openib user
951 * at create time. The CM sets them via a QP modify.
952 * Need to fix... I think the CM should
954 qhp
->attr
.enable_rdma_read
= 1;
955 qhp
->attr
.enable_rdma_write
= 1;
956 qhp
->attr
.enable_bind
= 1;
957 qhp
->attr
.max_ord
= 1;
958 qhp
->attr
.max_ird
= 1;
960 spin_lock_init(&qhp
->lock
);
961 init_waitqueue_head(&qhp
->wait
);
962 atomic_set(&qhp
->refcnt
, 1);
963 insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
);
967 struct iwch_mm_entry
*mm1
, *mm2
;
969 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
971 iwch_destroy_qp(&qhp
->ibqp
);
972 return ERR_PTR(-ENOMEM
);
975 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
978 iwch_destroy_qp(&qhp
->ibqp
);
979 return ERR_PTR(-ENOMEM
);
982 uresp
.qpid
= qhp
->wq
.qpid
;
983 uresp
.size_log2
= qhp
->wq
.size_log2
;
984 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
985 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
986 spin_lock(&ucontext
->mmap_lock
);
987 uresp
.key
= ucontext
->key
;
988 ucontext
->key
+= PAGE_SIZE
;
989 uresp
.db_key
= ucontext
->key
;
990 ucontext
->key
+= PAGE_SIZE
;
991 spin_unlock(&ucontext
->mmap_lock
);
992 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
995 iwch_destroy_qp(&qhp
->ibqp
);
996 return ERR_PTR(-EFAULT
);
998 mm1
->key
= uresp
.key
;
999 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
1000 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
1001 insert_mmap(ucontext
, mm1
);
1002 mm2
->key
= uresp
.db_key
;
1003 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
1004 mm2
->len
= PAGE_SIZE
;
1005 insert_mmap(ucontext
, mm2
);
1007 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
1008 init_timer(&(qhp
->timer
));
1009 PDBG("%s sq_num_entries %d, rq_num_entries %d "
1010 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1011 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1012 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
1013 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
1017 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1018 int attr_mask
, struct ib_udata
*udata
)
1020 struct iwch_dev
*rhp
;
1021 struct iwch_qp
*qhp
;
1022 enum iwch_qp_attr_mask mask
= 0;
1023 struct iwch_qp_attributes attrs
;
1025 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1027 /* iwarp does not support the RTR state */
1028 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1029 attr_mask
&= ~IB_QP_STATE
;
1031 /* Make sure we still have something left to do */
1035 memset(&attrs
, 0, sizeof attrs
);
1036 qhp
= to_iwch_qp(ibqp
);
1039 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1040 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1041 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1042 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1043 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1044 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1047 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1048 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1049 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1050 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1051 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1053 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1056 void iwch_qp_add_ref(struct ib_qp
*qp
)
1058 PDBG("%s ib_qp %p\n", __func__
, qp
);
1059 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1062 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1064 PDBG("%s ib_qp %p\n", __func__
, qp
);
1065 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1066 wake_up(&(to_iwch_qp(qp
)->wait
));
1069 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1071 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1072 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1076 static int iwch_query_pkey(struct ib_device
*ibdev
,
1077 u8 port
, u16 index
, u16
* pkey
)
1079 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1084 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1085 int index
, union ib_gid
*gid
)
1087 struct iwch_dev
*dev
;
1089 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1090 __func__
, ibdev
, port
, index
, gid
);
1091 dev
= to_iwch_dev(ibdev
);
1092 BUG_ON(port
== 0 || port
> 2);
1093 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1094 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1098 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1100 struct ethtool_drvinfo info
;
1101 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1103 unsigned fw_maj
, fw_min
, fw_mic
;
1106 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1109 next
= info
.fw_version
+ 1;
1110 cp
= strsep(&next
, ".");
1111 sscanf(cp
, "%i", &fw_maj
);
1112 cp
= strsep(&next
, ".");
1113 sscanf(cp
, "%i", &fw_min
);
1114 cp
= strsep(&next
, ".");
1115 sscanf(cp
, "%i", &fw_mic
);
1117 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1121 static int iwch_query_device(struct ib_device
*ibdev
,
1122 struct ib_device_attr
*props
)
1125 struct iwch_dev
*dev
;
1126 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1128 dev
= to_iwch_dev(ibdev
);
1129 memset(props
, 0, sizeof *props
);
1130 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1131 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1132 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1133 props
->device_cap_flags
= dev
->device_cap_flags
;
1134 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1135 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1136 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1137 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1138 props
->max_qp
= dev
->attr
.max_qps
;
1139 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1140 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1141 props
->max_sge_rd
= 1;
1142 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1143 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1144 props
->max_cq
= dev
->attr
.max_cqs
;
1145 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1146 props
->max_mr
= dev
->attr
.max_mem_regs
;
1147 props
->max_pd
= dev
->attr
.max_pds
;
1148 props
->local_ca_ack_delay
= 0;
1149 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1154 static int iwch_query_port(struct ib_device
*ibdev
,
1155 u8 port
, struct ib_port_attr
*props
)
1157 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1159 memset(props
, 0, sizeof(struct ib_port_attr
));
1160 props
->max_mtu
= IB_MTU_4096
;
1161 props
->active_mtu
= IB_MTU_2048
;
1162 props
->state
= IB_PORT_ACTIVE
;
1163 props
->port_cap_flags
=
1165 IB_PORT_SNMP_TUNNEL_SUP
|
1166 IB_PORT_REINIT_SUP
|
1167 IB_PORT_DEVICE_MGMT_SUP
|
1168 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1169 props
->gid_tbl_len
= 1;
1170 props
->pkey_tbl_len
= 1;
1171 props
->active_width
= 2;
1172 props
->active_speed
= 2;
1173 props
->max_msg_sz
= -1;
1178 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1181 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1183 PDBG("%s dev 0x%p\n", __func__
, dev
);
1184 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1187 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1189 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1191 struct ethtool_drvinfo info
;
1192 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1194 PDBG("%s dev 0x%p\n", __func__
, dev
);
1196 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1198 return sprintf(buf
, "%s\n", info
.fw_version
);
1201 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1204 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1206 struct ethtool_drvinfo info
;
1207 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1209 PDBG("%s dev 0x%p\n", __func__
, dev
);
1211 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1213 return sprintf(buf
, "%s\n", info
.driver
);
1216 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1219 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1221 PDBG("%s dev 0x%p\n", __func__
, dev
);
1222 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1223 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1226 static int iwch_get_mib(struct ib_device
*ibdev
,
1227 union rdma_protocol_stats
*stats
)
1229 struct iwch_dev
*dev
;
1230 struct tp_mib_stats m
;
1233 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1234 dev
= to_iwch_dev(ibdev
);
1235 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1239 memset(stats
, 0, sizeof *stats
);
1240 stats
->iw
.ipInReceives
= ((u64
) m
.ipInReceive_hi
<< 32) +
1242 stats
->iw
.ipInHdrErrors
= ((u64
) m
.ipInHdrErrors_hi
<< 32) +
1244 stats
->iw
.ipInAddrErrors
= ((u64
) m
.ipInAddrErrors_hi
<< 32) +
1245 m
.ipInAddrErrors_lo
;
1246 stats
->iw
.ipInUnknownProtos
= ((u64
) m
.ipInUnknownProtos_hi
<< 32) +
1247 m
.ipInUnknownProtos_lo
;
1248 stats
->iw
.ipInDiscards
= ((u64
) m
.ipInDiscards_hi
<< 32) +
1250 stats
->iw
.ipInDelivers
= ((u64
) m
.ipInDelivers_hi
<< 32) +
1252 stats
->iw
.ipOutRequests
= ((u64
) m
.ipOutRequests_hi
<< 32) +
1254 stats
->iw
.ipOutDiscards
= ((u64
) m
.ipOutDiscards_hi
<< 32) +
1256 stats
->iw
.ipOutNoRoutes
= ((u64
) m
.ipOutNoRoutes_hi
<< 32) +
1258 stats
->iw
.ipReasmTimeout
= (u64
) m
.ipReasmTimeout
;
1259 stats
->iw
.ipReasmReqds
= (u64
) m
.ipReasmReqds
;
1260 stats
->iw
.ipReasmOKs
= (u64
) m
.ipReasmOKs
;
1261 stats
->iw
.ipReasmFails
= (u64
) m
.ipReasmFails
;
1262 stats
->iw
.tcpActiveOpens
= (u64
) m
.tcpActiveOpens
;
1263 stats
->iw
.tcpPassiveOpens
= (u64
) m
.tcpPassiveOpens
;
1264 stats
->iw
.tcpAttemptFails
= (u64
) m
.tcpAttemptFails
;
1265 stats
->iw
.tcpEstabResets
= (u64
) m
.tcpEstabResets
;
1266 stats
->iw
.tcpOutRsts
= (u64
) m
.tcpOutRsts
;
1267 stats
->iw
.tcpCurrEstab
= (u64
) m
.tcpCurrEstab
;
1268 stats
->iw
.tcpInSegs
= ((u64
) m
.tcpInSegs_hi
<< 32) +
1270 stats
->iw
.tcpOutSegs
= ((u64
) m
.tcpOutSegs_hi
<< 32) +
1272 stats
->iw
.tcpRetransSegs
= ((u64
) m
.tcpRetransSeg_hi
<< 32) +
1274 stats
->iw
.tcpInErrs
= ((u64
) m
.tcpInErrs_hi
<< 32) +
1276 stats
->iw
.tcpRtoMin
= (u64
) m
.tcpRtoMin
;
1277 stats
->iw
.tcpRtoMax
= (u64
) m
.tcpRtoMax
;
1281 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1282 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1283 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1284 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1286 static struct device_attribute
*iwch_class_attributes
[] = {
1293 int iwch_register_device(struct iwch_dev
*dev
)
1298 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1299 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1300 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1301 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1302 dev
->ibdev
.owner
= THIS_MODULE
;
1303 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1304 IB_DEVICE_MEM_WINDOW
|
1305 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1307 /* cxgb3 supports STag 0. */
1308 dev
->ibdev
.local_dma_lkey
= 0;
1310 dev
->ibdev
.uverbs_cmd_mask
=
1311 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1312 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1313 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1314 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1315 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1316 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1317 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1318 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1319 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1320 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1321 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1322 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1323 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1324 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1325 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1326 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1327 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1328 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1329 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1330 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1331 dev
->ibdev
.num_comp_vectors
= 1;
1332 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1333 dev
->ibdev
.query_device
= iwch_query_device
;
1334 dev
->ibdev
.query_port
= iwch_query_port
;
1335 dev
->ibdev
.modify_port
= iwch_modify_port
;
1336 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1337 dev
->ibdev
.query_gid
= iwch_query_gid
;
1338 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1339 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1340 dev
->ibdev
.mmap
= iwch_mmap
;
1341 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1342 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1343 dev
->ibdev
.create_ah
= iwch_ah_create
;
1344 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1345 dev
->ibdev
.create_qp
= iwch_create_qp
;
1346 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1347 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1348 dev
->ibdev
.create_cq
= iwch_create_cq
;
1349 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1350 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1351 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1352 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1353 dev
->ibdev
.reg_phys_mr
= iwch_register_phys_mem
;
1354 dev
->ibdev
.rereg_phys_mr
= iwch_reregister_phys_mem
;
1355 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1356 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1357 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1358 dev
->ibdev
.bind_mw
= iwch_bind_mw
;
1359 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1360 dev
->ibdev
.alloc_fast_reg_mr
= iwch_alloc_fast_reg_mr
;
1361 dev
->ibdev
.alloc_fast_reg_page_list
= iwch_alloc_fastreg_pbl
;
1362 dev
->ibdev
.free_fast_reg_page_list
= iwch_free_fastreg_pbl
;
1363 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1364 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1365 dev
->ibdev
.process_mad
= iwch_process_mad
;
1366 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1367 dev
->ibdev
.post_send
= iwch_post_send
;
1368 dev
->ibdev
.post_recv
= iwch_post_receive
;
1369 dev
->ibdev
.get_protocol_stats
= iwch_get_mib
;
1371 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1372 if (!dev
->ibdev
.iwcm
)
1375 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1376 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1377 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1378 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1379 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1380 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1381 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1382 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1384 ret
= ib_register_device(&dev
->ibdev
);
1388 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1389 ret
= device_create_file(&dev
->ibdev
.dev
,
1390 iwch_class_attributes
[i
]);
1397 ib_unregister_device(&dev
->ibdev
);
1402 void iwch_unregister_device(struct iwch_dev
*dev
)
1406 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1407 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1408 device_remove_file(&dev
->ibdev
.dev
,
1409 iwch_class_attributes
[i
]);
1410 ib_unregister_device(&dev
->ibdev
);