2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __IWCH_PROVIDER_H__
34 #define __IWCH_PROVIDER_H__
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <rdma/ib_verbs.h>
39 #include <asm/types.h>
51 static inline struct iwch_pd
*to_iwch_pd(struct ib_pd
*ibpd
)
53 return container_of(ibpd
, struct iwch_pd
, ibpd
);
56 struct tpt_attributes
{
61 enum tpt_mem_perm perms
;
62 u32 remote_invaliate_disable
:1;
79 struct tpt_attributes attr
;
82 typedef struct iwch_mw iwch_mw_handle
;
84 static inline struct iwch_mr
*to_iwch_mr(struct ib_mr
*ibmr
)
86 return container_of(ibmr
, struct iwch_mr
, ibmr
);
93 struct tpt_attributes attr
;
96 static inline struct iwch_mw
*to_iwch_mw(struct ib_mw
*ibmw
)
98 return container_of(ibmw
, struct iwch_mw
, ibmw
);
103 struct iwch_dev
*rhp
;
107 wait_queue_head_t wait
;
108 u32 __user
*user_rptr_addr
;
111 static inline struct iwch_cq
*to_iwch_cq(struct ib_cq
*ibcq
)
113 return container_of(ibcq
, struct iwch_cq
, ibcq
);
120 struct iwch_mpa_attributes
{
121 u8 recv_marker_enabled
;
122 u8 xmit_marker_enabled
; /* iWARP: enable inbound Read Resp. */
124 u8 version
; /* 0 or 1 */
127 struct iwch_qp_attributes
{
133 u32 sq_max_sges_rdma_write
;
137 u8 enable_rdma_write
; /* enable inbound Read Resp. */
139 u8 enable_mmid0_fastreg
; /* Enable STAG0 + Fast-register */
141 * Next QP state. If specify the current state, only the
142 * QP attributes will be modified.
148 char terminate_buffer
[52];
149 u32 terminate_msg_len
;
150 u8 is_terminate_local
;
151 struct iwch_mpa_attributes mpa_attr
; /* IN-OUT */
152 struct iwch_ep
*llp_stream_handle
;
153 char *stream_msg_buf
; /* Last stream msg. before Idle -> RTS */
154 u32 stream_msg_buf_len
; /* Only on Idle -> RTS */
159 struct iwch_dev
*rhp
;
161 struct iwch_qp_attributes attr
;
165 wait_queue_head_t wait
;
166 enum IWCH_QP_FLAGS flags
;
167 struct timer_list timer
;
170 static inline int qp_quiesced(struct iwch_qp
*qhp
)
172 return qhp
->flags
& QP_QUIESCED
;
175 static inline struct iwch_qp
*to_iwch_qp(struct ib_qp
*ibqp
)
177 return container_of(ibqp
, struct iwch_qp
, ibqp
);
180 void iwch_qp_add_ref(struct ib_qp
*qp
);
181 void iwch_qp_rem_ref(struct ib_qp
*qp
);
182 struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
);
184 struct iwch_ucontext
{
185 struct ib_ucontext ibucontext
;
186 struct cxio_ucontext uctx
;
188 spinlock_t mmap_lock
;
189 struct list_head mmaps
;
192 static inline struct iwch_ucontext
*to_iwch_ucontext(struct ib_ucontext
*c
)
194 return container_of(c
, struct iwch_ucontext
, ibucontext
);
197 struct iwch_mm_entry
{
198 struct list_head entry
;
204 static inline struct iwch_mm_entry
*remove_mmap(struct iwch_ucontext
*ucontext
,
205 u32 key
, unsigned len
)
207 struct list_head
*pos
, *nxt
;
208 struct iwch_mm_entry
*mm
;
210 spin_lock(&ucontext
->mmap_lock
);
211 list_for_each_safe(pos
, nxt
, &ucontext
->mmaps
) {
213 mm
= list_entry(pos
, struct iwch_mm_entry
, entry
);
214 if (mm
->key
== key
&& mm
->len
== len
) {
215 list_del_init(&mm
->entry
);
216 spin_unlock(&ucontext
->mmap_lock
);
217 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__
,
218 key
, (unsigned long long) mm
->addr
, mm
->len
);
222 spin_unlock(&ucontext
->mmap_lock
);
226 static inline void insert_mmap(struct iwch_ucontext
*ucontext
,
227 struct iwch_mm_entry
*mm
)
229 spin_lock(&ucontext
->mmap_lock
);
230 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__
,
231 mm
->key
, (unsigned long long) mm
->addr
, mm
->len
);
232 list_add_tail(&mm
->entry
, &ucontext
->mmaps
);
233 spin_unlock(&ucontext
->mmap_lock
);
236 enum iwch_qp_attr_mask
{
237 IWCH_QP_ATTR_NEXT_STATE
= 1 << 0,
238 IWCH_QP_ATTR_ENABLE_RDMA_READ
= 1 << 7,
239 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
= 1 << 8,
240 IWCH_QP_ATTR_ENABLE_RDMA_BIND
= 1 << 9,
241 IWCH_QP_ATTR_MAX_ORD
= 1 << 11,
242 IWCH_QP_ATTR_MAX_IRD
= 1 << 12,
243 IWCH_QP_ATTR_LLP_STREAM_HANDLE
= 1 << 22,
244 IWCH_QP_ATTR_STREAM_MSG_BUFFER
= 1 << 23,
245 IWCH_QP_ATTR_MPA_ATTR
= 1 << 24,
246 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE
= 1 << 25,
247 IWCH_QP_ATTR_VALID_MODIFY
= (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
248 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
249 IWCH_QP_ATTR_MAX_ORD
|
250 IWCH_QP_ATTR_MAX_IRD
|
251 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
252 IWCH_QP_ATTR_STREAM_MSG_BUFFER
|
253 IWCH_QP_ATTR_MPA_ATTR
|
254 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE
)
257 int iwch_modify_qp(struct iwch_dev
*rhp
,
259 enum iwch_qp_attr_mask mask
,
260 struct iwch_qp_attributes
*attrs
,
267 IWCH_QP_STATE_TERMINATE
,
268 IWCH_QP_STATE_CLOSING
,
272 static inline int iwch_convert_state(enum ib_qp_state ib_state
)
277 return IWCH_QP_STATE_IDLE
;
279 return IWCH_QP_STATE_RTS
;
281 return IWCH_QP_STATE_CLOSING
;
283 return IWCH_QP_STATE_TERMINATE
;
285 return IWCH_QP_STATE_ERROR
;
291 enum iwch_mem_perms
{
292 IWCH_MEM_ACCESS_LOCAL_READ
= 1 << 0,
293 IWCH_MEM_ACCESS_LOCAL_WRITE
= 1 << 1,
294 IWCH_MEM_ACCESS_REMOTE_READ
= 1 << 2,
295 IWCH_MEM_ACCESS_REMOTE_WRITE
= 1 << 3,
296 IWCH_MEM_ACCESS_ATOMICS
= 1 << 4,
297 IWCH_MEM_ACCESS_BINDING
= 1 << 5,
298 IWCH_MEM_ACCESS_LOCAL
=
299 (IWCH_MEM_ACCESS_LOCAL_READ
| IWCH_MEM_ACCESS_LOCAL_WRITE
),
300 IWCH_MEM_ACCESS_REMOTE
=
301 (IWCH_MEM_ACCESS_REMOTE_WRITE
| IWCH_MEM_ACCESS_REMOTE_READ
)
302 /* cannot go beyond 1 << 31 */
303 } __attribute__ ((packed
));
305 static inline u32
iwch_convert_access(int acc
)
307 return (acc
& IB_ACCESS_REMOTE_WRITE
? IWCH_MEM_ACCESS_REMOTE_WRITE
: 0)
308 | (acc
& IB_ACCESS_REMOTE_READ
? IWCH_MEM_ACCESS_REMOTE_READ
: 0) |
309 (acc
& IB_ACCESS_LOCAL_WRITE
? IWCH_MEM_ACCESS_LOCAL_WRITE
: 0) |
310 (acc
& IB_ACCESS_MW_BIND
? IWCH_MEM_ACCESS_BINDING
: 0) |
311 IWCH_MEM_ACCESS_LOCAL_READ
;
314 enum iwch_mmid_state
{
315 IWCH_STAG_STATE_VALID
,
316 IWCH_STAG_STATE_INVALID
319 enum iwch_qp_query_flags
{
320 IWCH_QP_QUERY_CONTEXT_NONE
= 0x0, /* No ctx; Only attrs */
321 IWCH_QP_QUERY_CONTEXT_GET
= 0x1, /* Get ctx + attrs */
322 IWCH_QP_QUERY_CONTEXT_SUSPEND
= 0x2, /* Not Supported */
325 * Quiesce QP context; Consumer
326 * will NOT replay outstanding WR
328 IWCH_QP_QUERY_CONTEXT_QUIESCE
= 0x4,
329 IWCH_QP_QUERY_CONTEXT_REMOVE
= 0x8,
330 IWCH_QP_QUERY_TEST_USERWRITE
= 0x32 /* Test special */
333 int iwch_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
334 struct ib_send_wr
**bad_wr
);
335 int iwch_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
336 struct ib_recv_wr
**bad_wr
);
337 int iwch_bind_mw(struct ib_qp
*qp
,
339 struct ib_mw_bind
*mw_bind
);
340 int iwch_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
341 int iwch_post_terminate(struct iwch_qp
*qhp
, struct respQ_msg_t
*rsp_msg
);
342 int iwch_register_device(struct iwch_dev
*dev
);
343 void iwch_unregister_device(struct iwch_dev
*dev
);
344 int iwch_quiesce_qps(struct iwch_cq
*chp
);
345 int iwch_resume_qps(struct iwch_cq
*chp
);
346 void stop_read_rep_timer(struct iwch_qp
*qhp
);
347 int iwch_register_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
351 int iwch_reregister_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
356 int build_phys_page_list(struct ib_phys_buf
*buffer_list
,
365 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"