]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_verbs.h
1 /*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
22 *
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
26 *
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46 #ifndef __PVRDMA_VERBS_H__
47 #define __PVRDMA_VERBS_H__
48
49 #include <linux/types.h>
50
51 union pvrdma_gid {
52 u8 raw[16];
53 struct {
54 __be64 subnet_prefix;
55 __be64 interface_id;
56 } global;
57 };
58
59 enum pvrdma_link_layer {
60 PVRDMA_LINK_LAYER_UNSPECIFIED,
61 PVRDMA_LINK_LAYER_INFINIBAND,
62 PVRDMA_LINK_LAYER_ETHERNET,
63 };
64
65 enum pvrdma_mtu {
66 PVRDMA_MTU_256 = 1,
67 PVRDMA_MTU_512 = 2,
68 PVRDMA_MTU_1024 = 3,
69 PVRDMA_MTU_2048 = 4,
70 PVRDMA_MTU_4096 = 5,
71 };
72
73 static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
74 {
75 switch (mtu) {
76 case PVRDMA_MTU_256: return 256;
77 case PVRDMA_MTU_512: return 512;
78 case PVRDMA_MTU_1024: return 1024;
79 case PVRDMA_MTU_2048: return 2048;
80 case PVRDMA_MTU_4096: return 4096;
81 default: return -1;
82 }
83 }
84
85 static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
86 {
87 switch (mtu) {
88 case 256: return PVRDMA_MTU_256;
89 case 512: return PVRDMA_MTU_512;
90 case 1024: return PVRDMA_MTU_1024;
91 case 2048: return PVRDMA_MTU_2048;
92 case 4096:
93 default: return PVRDMA_MTU_4096;
94 }
95 }
96
97 enum pvrdma_port_state {
98 PVRDMA_PORT_NOP = 0,
99 PVRDMA_PORT_DOWN = 1,
100 PVRDMA_PORT_INIT = 2,
101 PVRDMA_PORT_ARMED = 3,
102 PVRDMA_PORT_ACTIVE = 4,
103 PVRDMA_PORT_ACTIVE_DEFER = 5,
104 };
105
106 enum pvrdma_port_cap_flags {
107 PVRDMA_PORT_SM = 1 << 1,
108 PVRDMA_PORT_NOTICE_SUP = 1 << 2,
109 PVRDMA_PORT_TRAP_SUP = 1 << 3,
110 PVRDMA_PORT_OPT_IPD_SUP = 1 << 4,
111 PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5,
112 PVRDMA_PORT_SL_MAP_SUP = 1 << 6,
113 PVRDMA_PORT_MKEY_NVRAM = 1 << 7,
114 PVRDMA_PORT_PKEY_NVRAM = 1 << 8,
115 PVRDMA_PORT_LED_INFO_SUP = 1 << 9,
116 PVRDMA_PORT_SM_DISABLED = 1 << 10,
117 PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
118 PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
119 PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
120 PVRDMA_PORT_CM_SUP = 1 << 16,
121 PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17,
122 PVRDMA_PORT_REINIT_SUP = 1 << 18,
123 PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19,
124 PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20,
125 PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21,
126 PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
127 PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23,
128 PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24,
129 PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25,
130 PVRDMA_PORT_IP_BASED_GIDS = 1 << 26,
131 PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS,
132 };
133
134 enum pvrdma_port_width {
135 PVRDMA_WIDTH_1X = 1,
136 PVRDMA_WIDTH_4X = 2,
137 PVRDMA_WIDTH_8X = 4,
138 PVRDMA_WIDTH_12X = 8,
139 };
140
141 static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
142 {
143 switch (width) {
144 case PVRDMA_WIDTH_1X: return 1;
145 case PVRDMA_WIDTH_4X: return 4;
146 case PVRDMA_WIDTH_8X: return 8;
147 case PVRDMA_WIDTH_12X: return 12;
148 default: return -1;
149 }
150 }
151
152 enum pvrdma_port_speed {
153 PVRDMA_SPEED_SDR = 1,
154 PVRDMA_SPEED_DDR = 2,
155 PVRDMA_SPEED_QDR = 4,
156 PVRDMA_SPEED_FDR10 = 8,
157 PVRDMA_SPEED_FDR = 16,
158 PVRDMA_SPEED_EDR = 32,
159 };
160
161 struct pvrdma_port_attr {
162 enum pvrdma_port_state state;
163 enum pvrdma_mtu max_mtu;
164 enum pvrdma_mtu active_mtu;
165 u32 gid_tbl_len;
166 u32 port_cap_flags;
167 u32 max_msg_sz;
168 u32 bad_pkey_cntr;
169 u32 qkey_viol_cntr;
170 u16 pkey_tbl_len;
171 u16 lid;
172 u16 sm_lid;
173 u8 lmc;
174 u8 max_vl_num;
175 u8 sm_sl;
176 u8 subnet_timeout;
177 u8 init_type_reply;
178 u8 active_width;
179 u8 active_speed;
180 u8 phys_state;
181 u8 reserved[2];
182 };
183
184 struct pvrdma_global_route {
185 union pvrdma_gid dgid;
186 u32 flow_label;
187 u8 sgid_index;
188 u8 hop_limit;
189 u8 traffic_class;
190 u8 reserved;
191 };
192
193 struct pvrdma_grh {
194 __be32 version_tclass_flow;
195 __be16 paylen;
196 u8 next_hdr;
197 u8 hop_limit;
198 union pvrdma_gid sgid;
199 union pvrdma_gid dgid;
200 };
201
202 enum pvrdma_ah_flags {
203 PVRDMA_AH_GRH = 1,
204 };
205
206 enum pvrdma_rate {
207 PVRDMA_RATE_PORT_CURRENT = 0,
208 PVRDMA_RATE_2_5_GBPS = 2,
209 PVRDMA_RATE_5_GBPS = 5,
210 PVRDMA_RATE_10_GBPS = 3,
211 PVRDMA_RATE_20_GBPS = 6,
212 PVRDMA_RATE_30_GBPS = 4,
213 PVRDMA_RATE_40_GBPS = 7,
214 PVRDMA_RATE_60_GBPS = 8,
215 PVRDMA_RATE_80_GBPS = 9,
216 PVRDMA_RATE_120_GBPS = 10,
217 PVRDMA_RATE_14_GBPS = 11,
218 PVRDMA_RATE_56_GBPS = 12,
219 PVRDMA_RATE_112_GBPS = 13,
220 PVRDMA_RATE_168_GBPS = 14,
221 PVRDMA_RATE_25_GBPS = 15,
222 PVRDMA_RATE_100_GBPS = 16,
223 PVRDMA_RATE_200_GBPS = 17,
224 PVRDMA_RATE_300_GBPS = 18,
225 };
226
227 struct pvrdma_ah_attr {
228 struct pvrdma_global_route grh;
229 u16 dlid;
230 u16 vlan_id;
231 u8 sl;
232 u8 src_path_bits;
233 u8 static_rate;
234 u8 ah_flags;
235 u8 port_num;
236 u8 dmac[6];
237 u8 reserved;
238 };
239
240 enum pvrdma_cq_notify_flags {
241 PVRDMA_CQ_SOLICITED = 1 << 0,
242 PVRDMA_CQ_NEXT_COMP = 1 << 1,
243 PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED |
244 PVRDMA_CQ_NEXT_COMP,
245 PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2,
246 };
247
248 struct pvrdma_qp_cap {
249 u32 max_send_wr;
250 u32 max_recv_wr;
251 u32 max_send_sge;
252 u32 max_recv_sge;
253 u32 max_inline_data;
254 u32 reserved;
255 };
256
257 enum pvrdma_sig_type {
258 PVRDMA_SIGNAL_ALL_WR,
259 PVRDMA_SIGNAL_REQ_WR,
260 };
261
262 enum pvrdma_qp_type {
263 PVRDMA_QPT_SMI,
264 PVRDMA_QPT_GSI,
265 PVRDMA_QPT_RC,
266 PVRDMA_QPT_UC,
267 PVRDMA_QPT_UD,
268 PVRDMA_QPT_RAW_IPV6,
269 PVRDMA_QPT_RAW_ETHERTYPE,
270 PVRDMA_QPT_RAW_PACKET = 8,
271 PVRDMA_QPT_XRC_INI = 9,
272 PVRDMA_QPT_XRC_TGT,
273 PVRDMA_QPT_MAX,
274 };
275
276 enum pvrdma_qp_create_flags {
277 PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0,
278 PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
279 };
280
281 enum pvrdma_qp_attr_mask {
282 PVRDMA_QP_STATE = 1 << 0,
283 PVRDMA_QP_CUR_STATE = 1 << 1,
284 PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
285 PVRDMA_QP_ACCESS_FLAGS = 1 << 3,
286 PVRDMA_QP_PKEY_INDEX = 1 << 4,
287 PVRDMA_QP_PORT = 1 << 5,
288 PVRDMA_QP_QKEY = 1 << 6,
289 PVRDMA_QP_AV = 1 << 7,
290 PVRDMA_QP_PATH_MTU = 1 << 8,
291 PVRDMA_QP_TIMEOUT = 1 << 9,
292 PVRDMA_QP_RETRY_CNT = 1 << 10,
293 PVRDMA_QP_RNR_RETRY = 1 << 11,
294 PVRDMA_QP_RQ_PSN = 1 << 12,
295 PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
296 PVRDMA_QP_ALT_PATH = 1 << 14,
297 PVRDMA_QP_MIN_RNR_TIMER = 1 << 15,
298 PVRDMA_QP_SQ_PSN = 1 << 16,
299 PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
300 PVRDMA_QP_PATH_MIG_STATE = 1 << 18,
301 PVRDMA_QP_CAP = 1 << 19,
302 PVRDMA_QP_DEST_QPN = 1 << 20,
303 PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN,
304 };
305
306 enum pvrdma_qp_state {
307 PVRDMA_QPS_RESET,
308 PVRDMA_QPS_INIT,
309 PVRDMA_QPS_RTR,
310 PVRDMA_QPS_RTS,
311 PVRDMA_QPS_SQD,
312 PVRDMA_QPS_SQE,
313 PVRDMA_QPS_ERR,
314 };
315
316 enum pvrdma_mig_state {
317 PVRDMA_MIG_MIGRATED,
318 PVRDMA_MIG_REARM,
319 PVRDMA_MIG_ARMED,
320 };
321
322 enum pvrdma_mw_type {
323 PVRDMA_MW_TYPE_1 = 1,
324 PVRDMA_MW_TYPE_2 = 2,
325 };
326
327 struct pvrdma_qp_attr {
328 enum pvrdma_qp_state qp_state;
329 enum pvrdma_qp_state cur_qp_state;
330 enum pvrdma_mtu path_mtu;
331 enum pvrdma_mig_state path_mig_state;
332 u32 qkey;
333 u32 rq_psn;
334 u32 sq_psn;
335 u32 dest_qp_num;
336 u32 qp_access_flags;
337 u16 pkey_index;
338 u16 alt_pkey_index;
339 u8 en_sqd_async_notify;
340 u8 sq_draining;
341 u8 max_rd_atomic;
342 u8 max_dest_rd_atomic;
343 u8 min_rnr_timer;
344 u8 port_num;
345 u8 timeout;
346 u8 retry_cnt;
347 u8 rnr_retry;
348 u8 alt_port_num;
349 u8 alt_timeout;
350 u8 reserved[5];
351 struct pvrdma_qp_cap cap;
352 struct pvrdma_ah_attr ah_attr;
353 struct pvrdma_ah_attr alt_ah_attr;
354 };
355
356 enum pvrdma_send_flags {
357 PVRDMA_SEND_FENCE = 1 << 0,
358 PVRDMA_SEND_SIGNALED = 1 << 1,
359 PVRDMA_SEND_SOLICITED = 1 << 2,
360 PVRDMA_SEND_INLINE = 1 << 3,
361 PVRDMA_SEND_IP_CSUM = 1 << 4,
362 PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM,
363 };
364
365 enum pvrdma_access_flags {
366 PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0,
367 PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1,
368 PVRDMA_ACCESS_REMOTE_READ = 1 << 2,
369 PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3,
370 PVRDMA_ACCESS_MW_BIND = 1 << 4,
371 PVRDMA_ZERO_BASED = 1 << 5,
372 PVRDMA_ACCESS_ON_DEMAND = 1 << 6,
373 PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND,
374 };
375
376 int pvrdma_query_device(struct ib_device *ibdev,
377 struct ib_device_attr *props,
378 struct ib_udata *udata);
379 int pvrdma_query_port(struct ib_device *ibdev, u8 port,
380 struct ib_port_attr *props);
381 int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
382 int index, union ib_gid *gid);
383 int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
384 u16 index, u16 *pkey);
385 enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
386 u8 port);
387 int pvrdma_modify_device(struct ib_device *ibdev, int mask,
388 struct ib_device_modify *props);
389 int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
390 int mask, struct ib_port_modify *props);
391 int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
392 struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
393 struct ib_udata *udata);
394 int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
395 struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
396 struct ib_ucontext *context,
397 struct ib_udata *udata);
398 int pvrdma_dealloc_pd(struct ib_pd *ibpd);
399 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
400 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
401 u64 virt_addr, int access_flags,
402 struct ib_udata *udata);
403 int pvrdma_dereg_mr(struct ib_mr *mr);
404 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
405 u32 max_num_sg);
406 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
407 int sg_nents, unsigned int *sg_offset);
408 int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
409 int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
410 struct ib_udata *udata);
411 struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
412 const struct ib_cq_init_attr *attr,
413 struct ib_ucontext *context,
414 struct ib_udata *udata);
415 int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
416 struct ib_udata *udata);
417 int pvrdma_destroy_cq(struct ib_cq *cq);
418 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
419 int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
420 struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
421 struct ib_udata *udata);
422 int pvrdma_destroy_ah(struct ib_ah *ah);
423 struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
424 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata);
426 int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
427 int attr_mask, struct ib_udata *udata);
428 int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
429 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
430 int pvrdma_destroy_qp(struct ib_qp *qp);
431 int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
432 struct ib_send_wr **bad_wr);
433 int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr);
435
436 #endif /* __PVRDMA_VERBS_H__ */