]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/cxgb3/iwch_provider.h
RDMA/cxgb3: Add driver for Chelsio T3 RNIC
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_provider.h
CommitLineData
b038ced7
SW
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef __IWCH_PROVIDER_H__
34#define __IWCH_PROVIDER_H__
35
36#include <linux/list.h>
37#include <linux/spinlock.h>
38#include <rdma/ib_verbs.h>
39#include <asm/types.h>
40#include "t3cdev.h"
41#include "iwch.h"
42#include "cxio_wr.h"
43#include "cxio_hal.h"
44
45struct iwch_pd {
46 struct ib_pd ibpd;
47 u32 pdid;
48 struct iwch_dev *rhp;
49};
50
51static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
52{
53 return container_of(ibpd, struct iwch_pd, ibpd);
54}
55
56struct tpt_attributes {
57 u32 stag;
58 u32 state:1;
59 u32 type:2;
60 u32 rsvd:1;
61 enum tpt_mem_perm perms;
62 u32 remote_invaliate_disable:1;
63 u32 zbva:1;
64 u32 mw_bind_enable:1;
65 u32 page_size:5;
66
67 u32 pdid;
68 u32 qpid;
69 u32 pbl_addr;
70 u32 len;
71 u64 va_fbo;
72 u32 pbl_size;
73};
74
75struct iwch_mr {
76 struct ib_mr ibmr;
77 struct iwch_dev *rhp;
78 u64 kva;
79 struct tpt_attributes attr;
80};
81
82typedef struct iwch_mw iwch_mw_handle;
83
84static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
85{
86 return container_of(ibmr, struct iwch_mr, ibmr);
87}
88
89struct iwch_mw {
90 struct ib_mw ibmw;
91 struct iwch_dev *rhp;
92 u64 kva;
93 struct tpt_attributes attr;
94};
95
96static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
97{
98 return container_of(ibmw, struct iwch_mw, ibmw);
99}
100
101struct iwch_cq {
102 struct ib_cq ibcq;
103 struct iwch_dev *rhp;
104 struct t3_cq cq;
105 spinlock_t lock;
106 atomic_t refcnt;
107 wait_queue_head_t wait;
108 u32 __user *user_rptr_addr;
109};
110
111static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
112{
113 return container_of(ibcq, struct iwch_cq, ibcq);
114}
115
116enum IWCH_QP_FLAGS {
117 QP_QUIESCED = 0x01
118};
119
120struct iwch_mpa_attributes {
121 u8 recv_marker_enabled;
122 u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
123 u8 crc_enabled;
124 u8 version; /* 0 or 1 */
125};
126
127struct iwch_qp_attributes {
128 u32 scq;
129 u32 rcq;
130 u32 sq_num_entries;
131 u32 rq_num_entries;
132 u32 sq_max_sges;
133 u32 sq_max_sges_rdma_write;
134 u32 rq_max_sges;
135 u32 state;
136 u8 enable_rdma_read;
137 u8 enable_rdma_write; /* enable inbound Read Resp. */
138 u8 enable_bind;
139 u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
140 /*
141 * Next QP state. If specify the current state, only the
142 * QP attributes will be modified.
143 */
144 u32 max_ord;
145 u32 max_ird;
146 u32 pd; /* IN */
147 u32 next_state;
148 char terminate_buffer[52];
149 u32 terminate_msg_len;
150 u8 is_terminate_local;
151 struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
152 struct iwch_ep *llp_stream_handle;
153 char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
154 u32 stream_msg_buf_len; /* Only on Idle -> RTS */
155};
156
157struct iwch_qp {
158 struct ib_qp ibqp;
159 struct iwch_dev *rhp;
160 struct iwch_ep *ep;
161 struct iwch_qp_attributes attr;
162 struct t3_wq wq;
163 spinlock_t lock;
164 atomic_t refcnt;
165 wait_queue_head_t wait;
166 enum IWCH_QP_FLAGS flags;
167 struct timer_list timer;
168};
169
170static inline int qp_quiesced(struct iwch_qp *qhp)
171{
172 return qhp->flags & QP_QUIESCED;
173}
174
175static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
176{
177 return container_of(ibqp, struct iwch_qp, ibqp);
178}
179
180void iwch_qp_add_ref(struct ib_qp *qp);
181void iwch_qp_rem_ref(struct ib_qp *qp);
182struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn);
183
184struct iwch_ucontext {
185 struct ib_ucontext ibucontext;
186 struct cxio_ucontext uctx;
187 u32 key;
188 spinlock_t mmap_lock;
189 struct list_head mmaps;
190};
191
192static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
193{
194 return container_of(c, struct iwch_ucontext, ibucontext);
195}
196
197struct iwch_mm_entry {
198 struct list_head entry;
199 u64 addr;
200 u32 key;
201 unsigned len;
202};
203
204static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
205 u32 key, unsigned len)
206{
207 struct list_head *pos, *nxt;
208 struct iwch_mm_entry *mm;
209
210 spin_lock(&ucontext->mmap_lock);
211 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
212
213 mm = list_entry(pos, struct iwch_mm_entry, entry);
214 if (mm->key == key && mm->len == len) {
215 list_del_init(&mm->entry);
216 spin_unlock(&ucontext->mmap_lock);
217 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
218 key, (unsigned long long) mm->addr, mm->len);
219 return mm;
220 }
221 }
222 spin_unlock(&ucontext->mmap_lock);
223 return NULL;
224}
225
226static inline void insert_mmap(struct iwch_ucontext *ucontext,
227 struct iwch_mm_entry *mm)
228{
229 spin_lock(&ucontext->mmap_lock);
230 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
231 mm->key, (unsigned long long) mm->addr, mm->len);
232 list_add_tail(&mm->entry, &ucontext->mmaps);
233 spin_unlock(&ucontext->mmap_lock);
234}
235
236enum iwch_qp_attr_mask {
237 IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
238 IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
239 IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
240 IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
241 IWCH_QP_ATTR_MAX_ORD = 1 << 11,
242 IWCH_QP_ATTR_MAX_IRD = 1 << 12,
243 IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
244 IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
245 IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
246 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
247 IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
248 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
249 IWCH_QP_ATTR_MAX_ORD |
250 IWCH_QP_ATTR_MAX_IRD |
251 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
252 IWCH_QP_ATTR_STREAM_MSG_BUFFER |
253 IWCH_QP_ATTR_MPA_ATTR |
254 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
255};
256
257int iwch_modify_qp(struct iwch_dev *rhp,
258 struct iwch_qp *qhp,
259 enum iwch_qp_attr_mask mask,
260 struct iwch_qp_attributes *attrs,
261 int internal);
262
263enum iwch_qp_state {
264 IWCH_QP_STATE_IDLE,
265 IWCH_QP_STATE_RTS,
266 IWCH_QP_STATE_ERROR,
267 IWCH_QP_STATE_TERMINATE,
268 IWCH_QP_STATE_CLOSING,
269 IWCH_QP_STATE_TOT
270};
271
272static inline int iwch_convert_state(enum ib_qp_state ib_state)
273{
274 switch (ib_state) {
275 case IB_QPS_RESET:
276 case IB_QPS_INIT:
277 return IWCH_QP_STATE_IDLE;
278 case IB_QPS_RTS:
279 return IWCH_QP_STATE_RTS;
280 case IB_QPS_SQD:
281 return IWCH_QP_STATE_CLOSING;
282 case IB_QPS_SQE:
283 return IWCH_QP_STATE_TERMINATE;
284 case IB_QPS_ERR:
285 return IWCH_QP_STATE_ERROR;
286 default:
287 return -1;
288 }
289}
290
291enum iwch_mem_perms {
292 IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0,
293 IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1,
294 IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2,
295 IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3,
296 IWCH_MEM_ACCESS_ATOMICS = 1 << 4,
297 IWCH_MEM_ACCESS_BINDING = 1 << 5,
298 IWCH_MEM_ACCESS_LOCAL =
299 (IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE),
300 IWCH_MEM_ACCESS_REMOTE =
301 (IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ)
302 /* cannot go beyond 1 << 31 */
303} __attribute__ ((packed));
304
305static inline u32 iwch_convert_access(int acc)
306{
307 return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0)
308 | (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) |
309 (acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) |
310 (acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) |
311 IWCH_MEM_ACCESS_LOCAL_READ;
312}
313
314enum iwch_mmid_state {
315 IWCH_STAG_STATE_VALID,
316 IWCH_STAG_STATE_INVALID
317};
318
319enum iwch_qp_query_flags {
320 IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
321 IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
322 IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
323
324 /*
325 * Quiesce QP context; Consumer
326 * will NOT replay outstanding WR
327 */
328 IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
329 IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
330 IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
331};
332
333int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
334 struct ib_send_wr **bad_wr);
335int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
336 struct ib_recv_wr **bad_wr);
337int iwch_bind_mw(struct ib_qp *qp,
338 struct ib_mw *mw,
339 struct ib_mw_bind *mw_bind);
340int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
341int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
342int iwch_register_device(struct iwch_dev *dev);
343void iwch_unregister_device(struct iwch_dev *dev);
344int iwch_quiesce_qps(struct iwch_cq *chp);
345int iwch_resume_qps(struct iwch_cq *chp);
346void stop_read_rep_timer(struct iwch_qp *qhp);
347int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
348 struct iwch_mr *mhp,
349 int shift,
350 __be64 *page_list);
351int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
352 struct iwch_mr *mhp,
353 int shift,
354 __be64 *page_list,
355 int npages);
356int build_phys_page_list(struct ib_phys_buf *buffer_list,
357 int num_phys_buf,
358 u64 *iova_start,
359 u64 *total_size,
360 int *npages,
361 int *shift,
362 __be64 **page_list);
363
364
365#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
366
367#endif