2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #ifndef __IW_CXGB4_H__
32 #define __IW_CXGB4_H__
34 #include <linux/mutex.h>
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <linux/idr.h>
38 #include <linux/completion.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/mm.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/inet.h>
44 #include <linux/wait.h>
45 #include <linux/kref.h>
46 #include <linux/timer.h>
48 #include <linux/workqueue.h>
50 #include <asm/byteorder.h>
52 #include <net/net_namespace.h>
54 #include <rdma/ib_verbs.h>
55 #include <rdma/iw_cm.h>
56 #include <rdma/rdma_netlink.h>
57 #include <rdma/iw_portmap.h>
58 #include <rdma/restrack.h>
61 #include "cxgb4_uld.h"
63 #include <rdma/cxgb4-abi.h>
65 #define DRV_NAME "iw_cxgb4"
66 #define MOD DRV_NAME ":"
72 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
76 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
77 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
79 static inline void *cplhdr(struct sk_buff
*skb
)
84 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
85 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
87 struct c4iw_id_table
{
89 u32 start
; /* logical minimal id */
90 u32 last
; /* hint for find */
96 struct c4iw_resource
{
97 struct c4iw_id_table tpt_table
;
98 struct c4iw_id_table qid_table
;
99 struct c4iw_id_table pdid_table
;
100 struct c4iw_id_table srq_table
;
103 struct c4iw_qid_list
{
104 struct list_head entry
;
108 struct c4iw_dev_ucontext
{
109 struct list_head qpids
;
110 struct list_head cqids
;
115 enum c4iw_rdev_flags
{
116 T4_FATAL_ERROR
= (1<<0),
117 T4_STATUS_PAGE_DISABLED
= (1<<1),
129 struct c4iw_stat qid
;
131 struct c4iw_stat stag
;
132 struct c4iw_stat pbl
;
133 struct c4iw_stat rqt
;
134 struct c4iw_stat srqt
;
135 struct c4iw_stat srq
;
136 struct c4iw_stat ocqp
;
140 u64 db_state_transitions
;
141 u64 db_fc_interruptions
;
143 u64 act_ofld_conn_fails
;
144 u64 pas_ofld_conn_fails
;
148 struct c4iw_hw_queue
{
149 int t4_eq_status_entries
;
159 struct wr_log_entry
{
160 ktime_t post_host_time
;
161 ktime_t poll_host_time
;
172 struct c4iw_resource resource
;
175 struct c4iw_dev_ucontext uctx
;
176 struct gen_pool
*pbl_pool
;
177 struct gen_pool
*rqt_pool
;
178 struct gen_pool
*ocqp_pool
;
180 struct cxgb4_lld_info lldi
;
181 unsigned long bar2_pa
;
182 void __iomem
*bar2_kva
;
183 unsigned long oc_mw_pa
;
184 void __iomem
*oc_mw_kva
;
185 struct c4iw_stats stats
;
186 struct c4iw_hw_queue hw_queue
;
187 struct t4_dev_status_page
*status_page
;
189 struct wr_log_entry
*wr_log
;
191 struct workqueue_struct
*free_workq
;
192 struct completion rqt_compl
;
193 struct completion pbl_compl
;
194 struct kref rqt_kref
;
195 struct kref pbl_kref
;
198 static inline int c4iw_fatal_error(struct c4iw_rdev
*rdev
)
200 return rdev
->flags
& T4_FATAL_ERROR
;
203 static inline int c4iw_num_stags(struct c4iw_rdev
*rdev
)
205 return (int)(rdev
->lldi
.vr
->stag
.size
>> 5);
208 #define C4IW_WR_TO (60*HZ)
210 struct c4iw_wr_wait
{
211 struct completion completion
;
216 void _c4iw_free_wr_wait(struct kref
*kref
);
218 static inline void c4iw_put_wr_wait(struct c4iw_wr_wait
*wr_waitp
)
220 pr_debug("wr_wait %p ref before put %u\n", wr_waitp
,
221 kref_read(&wr_waitp
->kref
));
222 WARN_ON(kref_read(&wr_waitp
->kref
) == 0);
223 kref_put(&wr_waitp
->kref
, _c4iw_free_wr_wait
);
226 static inline void c4iw_get_wr_wait(struct c4iw_wr_wait
*wr_waitp
)
228 pr_debug("wr_wait %p ref before get %u\n", wr_waitp
,
229 kref_read(&wr_waitp
->kref
));
230 WARN_ON(kref_read(&wr_waitp
->kref
) == 0);
231 kref_get(&wr_waitp
->kref
);
234 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait
*wr_waitp
)
237 init_completion(&wr_waitp
->completion
);
240 static inline void _c4iw_wake_up(struct c4iw_wr_wait
*wr_waitp
, int ret
,
244 complete(&wr_waitp
->completion
);
246 c4iw_put_wr_wait(wr_waitp
);
249 static inline void c4iw_wake_up_noref(struct c4iw_wr_wait
*wr_waitp
, int ret
)
251 _c4iw_wake_up(wr_waitp
, ret
, false);
254 static inline void c4iw_wake_up_deref(struct c4iw_wr_wait
*wr_waitp
, int ret
)
256 _c4iw_wake_up(wr_waitp
, ret
, true);
259 static inline int c4iw_wait_for_reply(struct c4iw_rdev
*rdev
,
260 struct c4iw_wr_wait
*wr_waitp
,
266 if (c4iw_fatal_error(rdev
)) {
267 wr_waitp
->ret
= -EIO
;
271 ret
= wait_for_completion_timeout(&wr_waitp
->completion
, C4IW_WR_TO
);
273 pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
274 func
, pci_name(rdev
->lldi
.pdev
), hwtid
, qpid
);
275 rdev
->flags
|= T4_FATAL_ERROR
;
276 wr_waitp
->ret
= -EIO
;
280 pr_debug("%s: FW reply %d tid %u qpid %u\n",
281 pci_name(rdev
->lldi
.pdev
), wr_waitp
->ret
, hwtid
, qpid
);
283 return wr_waitp
->ret
;
286 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
);
288 static inline int c4iw_ref_send_wait(struct c4iw_rdev
*rdev
,
290 struct c4iw_wr_wait
*wr_waitp
,
296 pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func
, wr_waitp
, hwtid
,
298 c4iw_get_wr_wait(wr_waitp
);
299 ret
= c4iw_ofld_send(rdev
, skb
);
301 c4iw_put_wr_wait(wr_waitp
);
304 return c4iw_wait_for_reply(rdev
, wr_waitp
, hwtid
, qpid
, func
);
315 struct ib_device ibdev
;
316 struct c4iw_rdev rdev
;
317 u32 device_cap_flags
;
322 struct mutex db_mutex
;
323 struct dentry
*debugfs_root
;
324 enum db_state db_state
;
325 struct idr hwtid_idr
;
328 struct list_head db_fc_list
;
330 wait_queue_head_t wait
;
334 struct list_head entry
;
335 struct cxgb4_lld_info lldi
;
336 struct c4iw_dev
*dev
;
337 struct work_struct reg_work
;
340 static inline struct c4iw_dev
*to_c4iw_dev(struct ib_device
*ibdev
)
342 return container_of(ibdev
, struct c4iw_dev
, ibdev
);
345 static inline struct c4iw_dev
*rdev_to_c4iw_dev(struct c4iw_rdev
*rdev
)
347 return container_of(rdev
, struct c4iw_dev
, rdev
);
350 static inline struct c4iw_cq
*get_chp(struct c4iw_dev
*rhp
, u32 cqid
)
352 return idr_find(&rhp
->cqidr
, cqid
);
355 static inline struct c4iw_qp
*get_qhp(struct c4iw_dev
*rhp
, u32 qpid
)
357 return idr_find(&rhp
->qpidr
, qpid
);
360 static inline struct c4iw_mr
*get_mhp(struct c4iw_dev
*rhp
, u32 mmid
)
362 return idr_find(&rhp
->mmidr
, mmid
);
365 static inline int _insert_handle(struct c4iw_dev
*rhp
, struct idr
*idr
,
366 void *handle
, u32 id
, int lock
)
371 idr_preload(GFP_KERNEL
);
372 spin_lock_irq(&rhp
->lock
);
375 ret
= idr_alloc(idr
, handle
, id
, id
+ 1, GFP_ATOMIC
);
378 spin_unlock_irq(&rhp
->lock
);
382 return ret
< 0 ? ret
: 0;
385 static inline int insert_handle(struct c4iw_dev
*rhp
, struct idr
*idr
,
386 void *handle
, u32 id
)
388 return _insert_handle(rhp
, idr
, handle
, id
, 1);
391 static inline int insert_handle_nolock(struct c4iw_dev
*rhp
, struct idr
*idr
,
392 void *handle
, u32 id
)
394 return _insert_handle(rhp
, idr
, handle
, id
, 0);
397 static inline void _remove_handle(struct c4iw_dev
*rhp
, struct idr
*idr
,
401 spin_lock_irq(&rhp
->lock
);
404 spin_unlock_irq(&rhp
->lock
);
407 static inline void remove_handle(struct c4iw_dev
*rhp
, struct idr
*idr
, u32 id
)
409 _remove_handle(rhp
, idr
, id
, 1);
412 static inline void remove_handle_nolock(struct c4iw_dev
*rhp
,
413 struct idr
*idr
, u32 id
)
415 _remove_handle(rhp
, idr
, id
, 0);
418 extern uint c4iw_max_read_depth
;
420 static inline int cur_max_read_depth(struct c4iw_dev
*dev
)
422 return min(dev
->rdev
.lldi
.max_ordird_qp
, c4iw_max_read_depth
);
428 struct c4iw_dev
*rhp
;
431 static inline struct c4iw_pd
*to_c4iw_pd(struct ib_pd
*ibpd
)
433 return container_of(ibpd
, struct c4iw_pd
, ibpd
);
436 struct tpt_attributes
{
439 enum fw_ri_mem_perms perms
;
448 u32 remote_invaliate_disable
:1;
450 u32 mw_bind_enable
:1;
456 struct ib_umem
*umem
;
457 struct c4iw_dev
*rhp
;
458 struct sk_buff
*dereg_skb
;
460 struct tpt_attributes attr
;
465 struct c4iw_wr_wait
*wr_waitp
;
468 static inline struct c4iw_mr
*to_c4iw_mr(struct ib_mr
*ibmr
)
470 return container_of(ibmr
, struct c4iw_mr
, ibmr
);
475 struct c4iw_dev
*rhp
;
476 struct sk_buff
*dereg_skb
;
478 struct tpt_attributes attr
;
479 struct c4iw_wr_wait
*wr_waitp
;
482 static inline struct c4iw_mw
*to_c4iw_mw(struct ib_mw
*ibmw
)
484 return container_of(ibmw
, struct c4iw_mw
, ibmw
);
489 struct c4iw_dev
*rhp
;
490 struct sk_buff
*destroy_skb
;
493 spinlock_t comp_handler_lock
;
495 wait_queue_head_t wait
;
496 struct c4iw_wr_wait
*wr_waitp
;
499 static inline struct c4iw_cq
*to_c4iw_cq(struct ib_cq
*ibcq
)
501 return container_of(ibcq
, struct c4iw_cq
, ibcq
);
504 struct c4iw_mpa_attributes
{
506 u8 recv_marker_enabled
;
507 u8 xmit_marker_enabled
;
509 u8 enhanced_rdma_conn
;
514 struct c4iw_qp_attributes
{
520 u32 sq_max_sges_rdma_write
;
524 u8 enable_rdma_write
;
526 u8 enable_mmid0_fastreg
;
531 char terminate_buffer
[52];
532 u32 terminate_msg_len
;
533 u8 is_terminate_local
;
534 struct c4iw_mpa_attributes mpa_attr
;
535 struct c4iw_ep
*llp_stream_handle
;
545 struct list_head db_fc_entry
;
546 struct c4iw_dev
*rhp
;
548 struct c4iw_qp_attributes attr
;
553 wait_queue_head_t wait
;
555 struct c4iw_srq
*srq
;
556 struct work_struct free_work
;
557 struct c4iw_ucontext
*ucontext
;
558 struct c4iw_wr_wait
*wr_waitp
;
561 static inline struct c4iw_qp
*to_c4iw_qp(struct ib_qp
*ibqp
)
563 return container_of(ibqp
, struct c4iw_qp
, ibqp
);
568 struct list_head db_fc_entry
;
569 struct c4iw_dev
*rhp
;
571 struct sk_buff
*destroy_skb
;
576 spinlock_t lock
; /* protects srq */
577 struct c4iw_wr_wait
*wr_waitp
;
581 static inline struct c4iw_srq
*to_c4iw_srq(struct ib_srq
*ibsrq
)
583 return container_of(ibsrq
, struct c4iw_srq
, ibsrq
);
586 struct c4iw_ucontext
{
587 struct ib_ucontext ibucontext
;
588 struct c4iw_dev_ucontext uctx
;
590 spinlock_t mmap_lock
;
591 struct list_head mmaps
;
596 static inline struct c4iw_ucontext
*to_c4iw_ucontext(struct ib_ucontext
*c
)
598 return container_of(c
, struct c4iw_ucontext
, ibucontext
);
601 void _c4iw_free_ucontext(struct kref
*kref
);
603 static inline void c4iw_put_ucontext(struct c4iw_ucontext
*ucontext
)
605 kref_put(&ucontext
->kref
, _c4iw_free_ucontext
);
608 static inline void c4iw_get_ucontext(struct c4iw_ucontext
*ucontext
)
610 kref_get(&ucontext
->kref
);
613 struct c4iw_mm_entry
{
614 struct list_head entry
;
620 static inline struct c4iw_mm_entry
*remove_mmap(struct c4iw_ucontext
*ucontext
,
621 u32 key
, unsigned len
)
623 struct list_head
*pos
, *nxt
;
624 struct c4iw_mm_entry
*mm
;
626 spin_lock(&ucontext
->mmap_lock
);
627 list_for_each_safe(pos
, nxt
, &ucontext
->mmaps
) {
629 mm
= list_entry(pos
, struct c4iw_mm_entry
, entry
);
630 if (mm
->key
== key
&& mm
->len
== len
) {
631 list_del_init(&mm
->entry
);
632 spin_unlock(&ucontext
->mmap_lock
);
633 pr_debug("key 0x%x addr 0x%llx len %d\n", key
,
634 (unsigned long long)mm
->addr
, mm
->len
);
638 spin_unlock(&ucontext
->mmap_lock
);
642 static inline void insert_mmap(struct c4iw_ucontext
*ucontext
,
643 struct c4iw_mm_entry
*mm
)
645 spin_lock(&ucontext
->mmap_lock
);
646 pr_debug("key 0x%x addr 0x%llx len %d\n",
647 mm
->key
, (unsigned long long)mm
->addr
, mm
->len
);
648 list_add_tail(&mm
->entry
, &ucontext
->mmaps
);
649 spin_unlock(&ucontext
->mmap_lock
);
652 enum c4iw_qp_attr_mask
{
653 C4IW_QP_ATTR_NEXT_STATE
= 1 << 0,
654 C4IW_QP_ATTR_SQ_DB
= 1<<1,
655 C4IW_QP_ATTR_RQ_DB
= 1<<2,
656 C4IW_QP_ATTR_ENABLE_RDMA_READ
= 1 << 7,
657 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
= 1 << 8,
658 C4IW_QP_ATTR_ENABLE_RDMA_BIND
= 1 << 9,
659 C4IW_QP_ATTR_MAX_ORD
= 1 << 11,
660 C4IW_QP_ATTR_MAX_IRD
= 1 << 12,
661 C4IW_QP_ATTR_LLP_STREAM_HANDLE
= 1 << 22,
662 C4IW_QP_ATTR_STREAM_MSG_BUFFER
= 1 << 23,
663 C4IW_QP_ATTR_MPA_ATTR
= 1 << 24,
664 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE
= 1 << 25,
665 C4IW_QP_ATTR_VALID_MODIFY
= (C4IW_QP_ATTR_ENABLE_RDMA_READ
|
666 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
|
667 C4IW_QP_ATTR_MAX_ORD
|
668 C4IW_QP_ATTR_MAX_IRD
|
669 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
670 C4IW_QP_ATTR_STREAM_MSG_BUFFER
|
671 C4IW_QP_ATTR_MPA_ATTR
|
672 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE
)
675 int c4iw_modify_qp(struct c4iw_dev
*rhp
,
677 enum c4iw_qp_attr_mask mask
,
678 struct c4iw_qp_attributes
*attrs
,
685 C4IW_QP_STATE_TERMINATE
,
686 C4IW_QP_STATE_CLOSING
,
690 static inline int c4iw_convert_state(enum ib_qp_state ib_state
)
695 return C4IW_QP_STATE_IDLE
;
697 return C4IW_QP_STATE_RTS
;
699 return C4IW_QP_STATE_CLOSING
;
701 return C4IW_QP_STATE_TERMINATE
;
703 return C4IW_QP_STATE_ERROR
;
709 static inline int to_ib_qp_state(int c4iw_qp_state
)
711 switch (c4iw_qp_state
) {
712 case C4IW_QP_STATE_IDLE
:
714 case C4IW_QP_STATE_RTS
:
716 case C4IW_QP_STATE_CLOSING
:
718 case C4IW_QP_STATE_TERMINATE
:
720 case C4IW_QP_STATE_ERROR
:
726 static inline u32
c4iw_ib_to_tpt_access(int a
)
728 return (a
& IB_ACCESS_REMOTE_WRITE
? FW_RI_MEM_ACCESS_REM_WRITE
: 0) |
729 (a
& IB_ACCESS_REMOTE_READ
? FW_RI_MEM_ACCESS_REM_READ
: 0) |
730 (a
& IB_ACCESS_LOCAL_WRITE
? FW_RI_MEM_ACCESS_LOCAL_WRITE
: 0) |
731 FW_RI_MEM_ACCESS_LOCAL_READ
;
734 static inline u32
c4iw_ib_to_tpt_bind_access(int acc
)
736 return (acc
& IB_ACCESS_REMOTE_WRITE
? FW_RI_MEM_ACCESS_REM_WRITE
: 0) |
737 (acc
& IB_ACCESS_REMOTE_READ
? FW_RI_MEM_ACCESS_REM_READ
: 0);
740 enum c4iw_mmid_state
{
741 C4IW_STAG_STATE_VALID
,
742 C4IW_STAG_STATE_INVALID
745 #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
747 #define MPA_KEY_REQ "MPA ID Req Frame"
748 #define MPA_KEY_REP "MPA ID Rep Frame"
750 #define MPA_MAX_PRIVATE_DATA 256
751 #define MPA_ENHANCED_RDMA_CONN 0x10
752 #define MPA_REJECT 0x20
754 #define MPA_MARKERS 0x80
755 #define MPA_FLAGS_MASK 0xE0
757 #define MPA_V2_PEER2PEER_MODEL 0x8000
758 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
759 #define MPA_V2_RDMA_WRITE_RTR 0x8000
760 #define MPA_V2_RDMA_READ_RTR 0x4000
761 #define MPA_V2_IRD_ORD_MASK 0x3FFF
763 #define c4iw_put_ep(ep) { \
764 pr_debug("put_ep ep %p refcnt %d\n", \
765 ep, kref_read(&((ep)->kref))); \
766 WARN_ON(kref_read(&((ep)->kref)) < 1); \
767 kref_put(&((ep)->kref), _c4iw_free_ep); \
770 #define c4iw_get_ep(ep) { \
771 pr_debug("get_ep ep %p, refcnt %d\n", \
772 ep, kref_read(&((ep)->kref))); \
773 kref_get(&((ep)->kref)); \
775 void _c4iw_free_ep(struct kref
*kref
);
781 __be16 private_data_size
;
785 struct mpa_v2_conn_params
{
790 struct terminate_message
{
797 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
799 enum c4iw_layers_types
{
803 RDMAP_LOCAL_CATA
= 0x00,
804 RDMAP_REMOTE_PROT
= 0x01,
805 RDMAP_REMOTE_OP
= 0x02,
806 DDP_LOCAL_CATA
= 0x00,
807 DDP_TAGGED_ERR
= 0x01,
808 DDP_UNTAGGED_ERR
= 0x02,
812 enum c4iw_rdma_ecodes
{
813 RDMAP_INV_STAG
= 0x00,
814 RDMAP_BASE_BOUNDS
= 0x01,
815 RDMAP_ACC_VIOL
= 0x02,
816 RDMAP_STAG_NOT_ASSOC
= 0x03,
817 RDMAP_TO_WRAP
= 0x04,
818 RDMAP_INV_VERS
= 0x05,
819 RDMAP_INV_OPCODE
= 0x06,
820 RDMAP_STREAM_CATA
= 0x07,
821 RDMAP_GLOBAL_CATA
= 0x08,
822 RDMAP_CANT_INV_STAG
= 0x09,
823 RDMAP_UNSPECIFIED
= 0xff
826 enum c4iw_ddp_ecodes
{
827 DDPT_INV_STAG
= 0x00,
828 DDPT_BASE_BOUNDS
= 0x01,
829 DDPT_STAG_NOT_ASSOC
= 0x02,
831 DDPT_INV_VERS
= 0x04,
833 DDPU_INV_MSN_NOBUF
= 0x02,
834 DDPU_INV_MSN_RANGE
= 0x03,
836 DDPU_MSG_TOOBIG
= 0x05,
840 enum c4iw_mpa_ecodes
{
842 MPA_MARKER_ERR
= 0x03,
843 MPA_LOCAL_CATA
= 0x05,
844 MPA_INSUFF_IRD
= 0x06,
845 MPA_NOMATCH_RTR
= 0x07,
864 PEER_ABORT_IN_PROGRESS
= 0,
865 ABORT_REQ_IN_PROGRESS
= 1,
866 RELEASE_RESOURCES
= 2,
873 enum c4iw_ep_history
{
893 CONN_RPL_UPCALL
= 19,
894 ACT_RETRY_NOMEM
= 20,
895 ACT_RETRY_INUSE
= 21,
904 enum conn_pre_alloc_buffers
{
907 CN_CLOSE_CON_REQ_BUF
,
914 FLOWC_LEN
= offsetof(struct fw_flowc_wr
, mnemval
[FW_FLOWC_MNEM_MAX
])
918 struct cpl_abort_req abrt_req
;
919 struct cpl_abort_rpl abrt_rpl
;
920 struct fw_ri_wr ri_req
;
921 struct cpl_close_con_req close_req
;
922 char flowc_buf
[FLOWC_LEN
];
925 struct c4iw_ep_common
{
926 struct iw_cm_id
*cm_id
;
928 struct c4iw_dev
*dev
;
929 struct sk_buff_head ep_skb_list
;
930 enum c4iw_ep_state state
;
933 struct sockaddr_storage local_addr
;
934 struct sockaddr_storage remote_addr
;
935 struct c4iw_wr_wait
*wr_waitp
;
937 unsigned long history
;
940 struct c4iw_listen_ep
{
941 struct c4iw_ep_common com
;
946 struct c4iw_ep_stats
{
947 unsigned connect_neg_adv
;
948 unsigned abort_neg_adv
;
952 struct c4iw_ep_common com
;
953 struct c4iw_ep
*parent_ep
;
954 struct timer_list timer
;
955 struct list_head entry
;
960 struct l2t_entry
*l2t
;
961 struct dst_entry
*dst
;
962 struct sk_buff
*mpa_skb
;
963 struct c4iw_mpa_attributes mpa_attr
;
964 u8 mpa_pkt
[sizeof(struct mpa_message
) + MPA_MAX_PRIVATE_DATA
];
965 unsigned int mpa_pkt_len
;
978 u8 retry_with_mpa_v1
;
979 u8 tried_with_mpa_v1
;
980 unsigned int retry_count
;
984 struct c4iw_ep_stats stats
;
987 static inline struct c4iw_ep
*to_ep(struct iw_cm_id
*cm_id
)
989 return cm_id
->provider_data
;
992 static inline struct c4iw_listen_ep
*to_listen_ep(struct iw_cm_id
*cm_id
)
994 return cm_id
->provider_data
;
997 static inline int ocqp_supported(const struct cxgb4_lld_info
*infop
)
999 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
1000 return infop
->vr
->ocq
.size
> 0;
1006 u32
c4iw_id_alloc(struct c4iw_id_table
*alloc
);
1007 void c4iw_id_free(struct c4iw_id_table
*alloc
, u32 obj
);
1008 int c4iw_id_table_alloc(struct c4iw_id_table
*alloc
, u32 start
, u32 num
,
1009 u32 reserved
, u32 flags
);
1010 void c4iw_id_table_free(struct c4iw_id_table
*alloc
);
1012 typedef int (*c4iw_handler_func
)(struct c4iw_dev
*dev
, struct sk_buff
*skb
);
1014 int c4iw_ep_redirect(void *ctx
, struct dst_entry
*old
, struct dst_entry
*new,
1015 struct l2t_entry
*l2t
);
1016 void c4iw_put_qpid(struct c4iw_rdev
*rdev
, u32 qpid
,
1017 struct c4iw_dev_ucontext
*uctx
);
1018 u32
c4iw_get_resource(struct c4iw_id_table
*id_table
);
1019 void c4iw_put_resource(struct c4iw_id_table
*id_table
, u32 entry
);
1020 int c4iw_init_resource(struct c4iw_rdev
*rdev
, u32 nr_tpt
,
1021 u32 nr_pdid
, u32 nr_srqt
);
1022 int c4iw_init_ctrl_qp(struct c4iw_rdev
*rdev
);
1023 int c4iw_pblpool_create(struct c4iw_rdev
*rdev
);
1024 int c4iw_rqtpool_create(struct c4iw_rdev
*rdev
);
1025 int c4iw_ocqp_pool_create(struct c4iw_rdev
*rdev
);
1026 void c4iw_pblpool_destroy(struct c4iw_rdev
*rdev
);
1027 void c4iw_rqtpool_destroy(struct c4iw_rdev
*rdev
);
1028 void c4iw_ocqp_pool_destroy(struct c4iw_rdev
*rdev
);
1029 void c4iw_destroy_resource(struct c4iw_resource
*rscp
);
1030 int c4iw_destroy_ctrl_qp(struct c4iw_rdev
*rdev
);
1031 void c4iw_register_device(struct work_struct
*work
);
1032 void c4iw_unregister_device(struct c4iw_dev
*dev
);
1033 int __init
c4iw_cm_init(void);
1034 void c4iw_cm_term(void);
1035 void c4iw_release_dev_ucontext(struct c4iw_rdev
*rdev
,
1036 struct c4iw_dev_ucontext
*uctx
);
1037 void c4iw_init_dev_ucontext(struct c4iw_rdev
*rdev
,
1038 struct c4iw_dev_ucontext
*uctx
);
1039 int c4iw_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
1040 int c4iw_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
1041 const struct ib_send_wr
**bad_wr
);
1042 int c4iw_post_receive(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1043 const struct ib_recv_wr
**bad_wr
);
1044 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
);
1045 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
);
1046 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
);
1047 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
);
1048 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
);
1049 void c4iw_qp_add_ref(struct ib_qp
*qp
);
1050 void c4iw_qp_rem_ref(struct ib_qp
*qp
);
1051 struct ib_mr
*c4iw_alloc_mr(struct ib_pd
*pd
,
1052 enum ib_mr_type mr_type
,
1054 int c4iw_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1055 unsigned int *sg_offset
);
1056 int c4iw_dealloc_mw(struct ib_mw
*mw
);
1057 void c4iw_dealloc(struct uld_ctx
*ctx
);
1058 struct ib_mw
*c4iw_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1059 struct ib_udata
*udata
);
1060 struct ib_mr
*c4iw_reg_user_mr(struct ib_pd
*pd
, u64 start
,
1061 u64 length
, u64 virt
, int acc
,
1062 struct ib_udata
*udata
);
1063 struct ib_mr
*c4iw_get_dma_mr(struct ib_pd
*pd
, int acc
);
1064 int c4iw_dereg_mr(struct ib_mr
*ib_mr
);
1065 int c4iw_destroy_cq(struct ib_cq
*ib_cq
);
1066 struct ib_cq
*c4iw_create_cq(struct ib_device
*ibdev
,
1067 const struct ib_cq_init_attr
*attr
,
1068 struct ib_ucontext
*ib_context
,
1069 struct ib_udata
*udata
);
1070 int c4iw_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
1071 int c4iw_modify_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*attr
,
1072 enum ib_srq_attr_mask srq_attr_mask
,
1073 struct ib_udata
*udata
);
1074 int c4iw_destroy_srq(struct ib_srq
*ib_srq
);
1075 struct ib_srq
*c4iw_create_srq(struct ib_pd
*pd
,
1076 struct ib_srq_init_attr
*attrs
,
1077 struct ib_udata
*udata
);
1078 int c4iw_destroy_qp(struct ib_qp
*ib_qp
);
1079 struct ib_qp
*c4iw_create_qp(struct ib_pd
*pd
,
1080 struct ib_qp_init_attr
*attrs
,
1081 struct ib_udata
*udata
);
1082 int c4iw_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1083 int attr_mask
, struct ib_udata
*udata
);
1084 int c4iw_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1085 int attr_mask
, struct ib_qp_init_attr
*init_attr
);
1086 struct ib_qp
*c4iw_get_qp(struct ib_device
*dev
, int qpn
);
1087 u32
c4iw_rqtpool_alloc(struct c4iw_rdev
*rdev
, int size
);
1088 void c4iw_rqtpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
);
1089 u32
c4iw_pblpool_alloc(struct c4iw_rdev
*rdev
, int size
);
1090 void c4iw_pblpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
);
1091 u32
c4iw_ocqp_pool_alloc(struct c4iw_rdev
*rdev
, int size
);
1092 void c4iw_ocqp_pool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
);
1093 void c4iw_flush_hw_cq(struct c4iw_cq
*chp
, struct c4iw_qp
*flush_qhp
);
1094 void c4iw_count_rcqes(struct t4_cq
*cq
, struct t4_wq
*wq
, int *count
);
1095 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
);
1096 int c4iw_flush_rq(struct t4_wq
*wq
, struct t4_cq
*cq
, int count
);
1097 int c4iw_flush_sq(struct c4iw_qp
*qhp
);
1098 int c4iw_ev_handler(struct c4iw_dev
*rnicp
, u32 qid
);
1099 u16
c4iw_rqes_posted(struct c4iw_qp
*qhp
);
1100 int c4iw_post_terminate(struct c4iw_qp
*qhp
, struct t4_cqe
*err_cqe
);
1101 u32
c4iw_get_cqid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
);
1102 void c4iw_put_cqid(struct c4iw_rdev
*rdev
, u32 qid
,
1103 struct c4iw_dev_ucontext
*uctx
);
1104 u32
c4iw_get_qpid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
);
1105 void c4iw_put_qpid(struct c4iw_rdev
*rdev
, u32 qid
,
1106 struct c4iw_dev_ucontext
*uctx
);
1107 void c4iw_ev_dispatch(struct c4iw_dev
*dev
, struct t4_cqe
*err_cqe
);
1109 extern struct cxgb4_client t4c_client
;
1110 extern c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
];
1111 void __iomem
*c4iw_bar2_addrs(struct c4iw_rdev
*rdev
, unsigned int qid
,
1112 enum cxgb4_bar2_qtype qtype
,
1113 unsigned int *pbar2_qid
, u64
*pbar2_pa
);
1114 int c4iw_alloc_srq_idx(struct c4iw_rdev
*rdev
);
1115 void c4iw_free_srq_idx(struct c4iw_rdev
*rdev
, int idx
);
1116 extern void c4iw_log_wr_stats(struct t4_wq
*wq
, struct t4_cqe
*cqe
);
1117 extern int c4iw_wr_log
;
1118 extern int db_fc_threshold
;
1119 extern int db_coalescing_threshold
;
1120 extern int use_dsgl
;
1121 void c4iw_invalidate_mr(struct c4iw_dev
*rhp
, u32 rkey
);
1122 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq
*srq
);
1123 void c4iw_copy_wr_to_srq(struct t4_srq
*srq
, union t4_recv_wr
*wqe
, u8 len16
);
1124 void c4iw_flush_srqidx(struct c4iw_qp
*qhp
, u32 srqidx
);
1125 int c4iw_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
1126 const struct ib_recv_wr
**bad_wr
);
1127 struct c4iw_wr_wait
*c4iw_alloc_wr_wait(gfp_t gfp
);
1129 typedef int c4iw_restrack_func(struct sk_buff
*msg
,
1130 struct rdma_restrack_entry
*res
);
1131 extern c4iw_restrack_func
*c4iw_restrack_funcs
[RDMA_RESTRACK_MAX
];