]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/user.h
IB/mlx5: Enable flow steering for IPv6 traffic
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / user.h
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_USER_H
34#define MLX5_IB_USER_H
35
36#include <linux/types.h>
37
cfb5e088
HA
38#include "mlx5_ib.h"
39
e126ba97
EC
40enum {
41 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
43};
44
45enum {
46 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
47};
48
79b20a6c
YH
49enum {
50 MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
51};
52
e126ba97
EC
53
54/* Increment this value if any changes that break userspace ABI
55 * compatibility are made.
56 */
57#define MLX5_IB_UVERBS_ABI_VERSION 1
58
59/* Make sure that all structs defined in this file remain laid out so
60 * that they pack the same way on 32-bit and 64-bit architectures (to
61 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
62 * In particular do not use pointer types -- pass pointers in __u64
63 * instead.
64 */
65
66struct mlx5_ib_alloc_ucontext_req {
67 __u32 total_num_uuars;
68 __u32 num_low_latency_uuars;
69};
70
78c0f98c
EC
71struct mlx5_ib_alloc_ucontext_req_v2 {
72 __u32 total_num_uuars;
73 __u32 num_low_latency_uuars;
74 __u32 flags;
b368d7cb 75 __u32 comp_mask;
f72300c5
HA
76 __u8 max_cqe_version;
77 __u8 reserved0;
78 __u16 reserved1;
79 __u32 reserved2;
b368d7cb
MB
80};
81
82enum mlx5_ib_alloc_ucontext_resp_mask {
83 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
78c0f98c
EC
84};
85
e126ba97
EC
86struct mlx5_ib_alloc_ucontext_resp {
87 __u32 qp_tab_size;
88 __u32 bf_reg_size;
89 __u32 tot_uuars;
90 __u32 cache_line_size;
91 __u16 max_sq_desc_sz;
92 __u16 max_rq_desc_sz;
93 __u32 max_send_wqebb;
94 __u32 max_recv_wr;
95 __u32 max_srq_recv_wr;
96 __u16 num_ports;
b368d7cb
MB
97 __u16 reserved1;
98 __u32 comp_mask;
99 __u32 response_length;
f72300c5
HA
100 __u8 cqe_version;
101 __u8 reserved2;
102 __u16 reserved3;
b368d7cb 103 __u64 hca_core_clock_offset;
e126ba97
EC
104};
105
106struct mlx5_ib_alloc_pd_resp {
107 __u32 pdn;
108};
109
110struct mlx5_ib_create_cq {
111 __u64 buf_addr;
112 __u64 db_addr;
113 __u32 cqe_size;
a8237b32 114 __u32 reserved; /* explicit padding (optional on i386) */
e126ba97
EC
115};
116
117struct mlx5_ib_create_cq_resp {
118 __u32 cqn;
119 __u32 reserved;
120};
121
122struct mlx5_ib_resize_cq {
123 __u64 buf_addr;
bde51583
EC
124 __u16 cqe_size;
125 __u16 reserved0;
126 __u32 reserved1;
e126ba97
EC
127};
128
129struct mlx5_ib_create_srq {
130 __u64 buf_addr;
131 __u64 db_addr;
132 __u32 flags;
cfb5e088
HA
133 __u32 reserved0; /* explicit padding (optional on i386) */
134 __u32 uidx;
135 __u32 reserved1;
e126ba97
EC
136};
137
138struct mlx5_ib_create_srq_resp {
139 __u32 srqn;
140 __u32 reserved;
141};
142
143struct mlx5_ib_create_qp {
144 __u64 buf_addr;
145 __u64 db_addr;
146 __u32 sq_wqe_count;
147 __u32 rq_wqe_count;
148 __u32 rq_wqe_shift;
149 __u32 flags;
cfb5e088
HA
150 __u32 uidx;
151 __u32 reserved0;
0fb2ed66 152 __u64 sq_buf_addr;
e126ba97
EC
153};
154
28d61370
YH
155/* RX Hash function flags */
156enum mlx5_rx_hash_function_flags {
157 MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
158};
159
160/*
161 * RX Hash flags, these flags allows to set which incoming packet's field should
162 * participates in RX Hash. Each flag represent certain packet's field,
163 * when the flag is set the field that is represented by the flag will
164 * participate in RX Hash calculation.
165 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
166 * and *TCP and *UDP flags can't be enabled together on the same QP.
167*/
168enum mlx5_rx_hash_fields {
169 MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
170 MLX5_RX_HASH_DST_IPV4 = 1 << 1,
171 MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
172 MLX5_RX_HASH_DST_IPV6 = 1 << 3,
173 MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
174 MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
175 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
176 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
177};
178
179struct mlx5_ib_create_qp_rss {
180 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
181 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
182 __u8 rx_key_len; /* valid only for Toeplitz */
183 __u8 reserved[6];
184 __u8 rx_hash_key[128]; /* valid only for Toeplitz */
185 __u32 comp_mask;
186 __u32 reserved1;
187};
188
e126ba97
EC
189struct mlx5_ib_create_qp_resp {
190 __u32 uuar_index;
191};
cfb5e088 192
d2370e0a
MB
193struct mlx5_ib_alloc_mw {
194 __u32 comp_mask;
195 __u8 num_klms;
196 __u8 reserved1;
197 __u16 reserved2;
198};
199
79b20a6c
YH
200struct mlx5_ib_create_wq {
201 __u64 buf_addr;
202 __u64 db_addr;
203 __u32 rq_wqe_count;
204 __u32 rq_wqe_shift;
205 __u32 user_index;
206 __u32 flags;
207 __u32 comp_mask;
208 __u32 reserved;
209};
210
211struct mlx5_ib_create_wq_resp {
212 __u32 response_length;
213 __u32 reserved;
214};
215
c5f90929
YH
216struct mlx5_ib_create_rwq_ind_tbl_resp {
217 __u32 response_length;
218 __u32 reserved;
219};
220
79b20a6c
YH
221struct mlx5_ib_modify_wq {
222 __u32 comp_mask;
223 __u32 reserved;
224};
225
cfb5e088
HA
226static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
227 struct mlx5_ib_create_qp *ucmd,
228 int inlen,
229 u32 *user_index)
230{
231 u8 cqe_version = ucontext->cqe_version;
232
233 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
234 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
235 return 0;
236
237 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
238 !!cqe_version))
239 return -EINVAL;
240
241 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
242}
243
244static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
245 struct mlx5_ib_create_srq *ucmd,
246 int inlen,
247 u32 *user_index)
248{
249 u8 cqe_version = ucontext->cqe_version;
250
251 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
252 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
253 return 0;
254
255 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
256 !!cqe_version))
257 return -EINVAL;
258
259 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
260}
e126ba97 261#endif /* MLX5_IB_USER_H */