]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/vport.c
IB/mlx5: Unify CQ create flags check
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
40 {
41 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
42 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
43 int err;
44
45 memset(in, 0, sizeof(in));
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50
51 err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
52 sizeof(out));
53 if (err)
54 mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
55
56 return MLX5_GET(query_vport_state_out, out, state);
57 }
58 EXPORT_SYMBOL(mlx5_query_vport_state);
59
60 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 *out,
61 int outlen)
62 {
63 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
64
65 memset(in, 0, sizeof(in));
66
67 MLX5_SET(query_nic_vport_context_in, in, opcode,
68 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
69
70 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
71 }
72
73 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
74 int inlen)
75 {
76 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
77
78 MLX5_SET(modify_nic_vport_context_in, in, opcode,
79 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
80
81 return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
82 }
83
84 void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
85 {
86 u32 *out;
87 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
88 u8 *out_addr;
89 int err;
90
91 out = mlx5_vzalloc(outlen);
92 if (!out)
93 return;
94
95 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
96 nic_vport_context.permanent_address);
97
98 err = mlx5_query_nic_vport_context(mdev, out, outlen);
99 if (!err)
100 ether_addr_copy(addr, &out_addr[2]);
101
102 kvfree(out);
103 }
104 EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
105
106 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
107 u64 *system_image_guid)
108 {
109 u32 *out;
110 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
111
112 out = mlx5_vzalloc(outlen);
113 if (!out)
114 return -ENOMEM;
115
116 mlx5_query_nic_vport_context(mdev, out, outlen);
117
118 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
119 nic_vport_context.system_image_guid);
120
121 kfree(out);
122
123 return 0;
124 }
125 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
126
127 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
128 {
129 u32 *out;
130 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
131
132 out = mlx5_vzalloc(outlen);
133 if (!out)
134 return -ENOMEM;
135
136 mlx5_query_nic_vport_context(mdev, out, outlen);
137
138 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
139 nic_vport_context.node_guid);
140
141 kfree(out);
142
143 return 0;
144 }
145 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
146
147 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
148 u16 *qkey_viol_cntr)
149 {
150 u32 *out;
151 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
152
153 out = mlx5_vzalloc(outlen);
154 if (!out)
155 return -ENOMEM;
156
157 mlx5_query_nic_vport_context(mdev, out, outlen);
158
159 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
160 nic_vport_context.qkey_violation_counter);
161
162 kfree(out);
163
164 return 0;
165 }
166 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
167
168 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
169 u8 port_num, u16 vf_num, u16 gid_index,
170 union ib_gid *gid)
171 {
172 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
173 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
174 int is_group_manager;
175 void *out = NULL;
176 void *in = NULL;
177 union ib_gid *tmp;
178 int tbsz;
179 int nout;
180 int err;
181
182 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
183 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
184 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
185 vf_num, gid_index, tbsz);
186
187 if (gid_index > tbsz && gid_index != 0xffff)
188 return -EINVAL;
189
190 if (gid_index == 0xffff)
191 nout = tbsz;
192 else
193 nout = 1;
194
195 out_sz += nout * sizeof(*gid);
196
197 in = kzalloc(in_sz, GFP_KERNEL);
198 out = kzalloc(out_sz, GFP_KERNEL);
199 if (!in || !out) {
200 err = -ENOMEM;
201 goto out;
202 }
203
204 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
205 if (other_vport) {
206 if (is_group_manager) {
207 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
208 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
209 } else {
210 err = -EPERM;
211 goto out;
212 }
213 }
214 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
215
216 if (MLX5_CAP_GEN(dev, num_ports) == 2)
217 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
218
219 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
220 if (err)
221 goto out;
222
223 err = mlx5_cmd_status_to_err_v2(out);
224 if (err)
225 goto out;
226
227 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
228 gid->global.subnet_prefix = tmp->global.subnet_prefix;
229 gid->global.interface_id = tmp->global.interface_id;
230
231 out:
232 kfree(in);
233 kfree(out);
234 return err;
235 }
236 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
237
238 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
239 u8 port_num, u16 vf_num, u16 pkey_index,
240 u16 *pkey)
241 {
242 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
243 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
244 int is_group_manager;
245 void *out = NULL;
246 void *in = NULL;
247 void *pkarr;
248 int nout;
249 int tbsz;
250 int err;
251 int i;
252
253 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
254
255 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
256 if (pkey_index > tbsz && pkey_index != 0xffff)
257 return -EINVAL;
258
259 if (pkey_index == 0xffff)
260 nout = tbsz;
261 else
262 nout = 1;
263
264 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
265
266 in = kzalloc(in_sz, GFP_KERNEL);
267 out = kzalloc(out_sz, GFP_KERNEL);
268 if (!in || !out) {
269 err = -ENOMEM;
270 goto out;
271 }
272
273 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
274 if (other_vport) {
275 if (is_group_manager) {
276 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
277 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
278 } else {
279 err = -EPERM;
280 goto out;
281 }
282 }
283 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
284
285 if (MLX5_CAP_GEN(dev, num_ports) == 2)
286 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
287
288 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
289 if (err)
290 goto out;
291
292 err = mlx5_cmd_status_to_err_v2(out);
293 if (err)
294 goto out;
295
296 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
297 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
298 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
299
300 out:
301 kfree(in);
302 kfree(out);
303 return err;
304 }
305 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
306
307 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
308 u8 other_vport, u8 port_num,
309 u16 vf_num,
310 struct mlx5_hca_vport_context *rep)
311 {
312 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
313 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
314 int is_group_manager;
315 void *out;
316 void *ctx;
317 int err;
318
319 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
320
321 memset(in, 0, sizeof(in));
322 out = kzalloc(out_sz, GFP_KERNEL);
323 if (!out)
324 return -ENOMEM;
325
326 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
327
328 if (other_vport) {
329 if (is_group_manager) {
330 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
331 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
332 } else {
333 err = -EPERM;
334 goto ex;
335 }
336 }
337
338 if (MLX5_CAP_GEN(dev, num_ports) == 2)
339 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
340
341 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
342 if (err)
343 goto ex;
344 err = mlx5_cmd_status_to_err_v2(out);
345 if (err)
346 goto ex;
347
348 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
349 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
350 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
351 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
352 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
353 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
354 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
355 port_physical_state);
356 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
357 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
358 port_physical_state);
359 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
360 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
361 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
362 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
363 cap_mask1_field_select);
364 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
365 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
366 cap_mask2_field_select);
367 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
368 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
369 init_type_reply);
370 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
371 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
372 subnet_timeout);
373 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
374 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
375 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
376 qkey_violation_counter);
377 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
378 pkey_violation_counter);
379 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
380 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
381 system_image_guid);
382
383 ex:
384 kfree(out);
385 return err;
386 }
387 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
388
389 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
390 u64 *sys_image_guid)
391 {
392 struct mlx5_hca_vport_context *rep;
393 int err;
394
395 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
396 if (!rep)
397 return -ENOMEM;
398
399 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
400 if (!err)
401 *sys_image_guid = rep->sys_image_guid;
402
403 kfree(rep);
404 return err;
405 }
406 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
407
408 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
409 u64 *node_guid)
410 {
411 struct mlx5_hca_vport_context *rep;
412 int err;
413
414 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
415 if (!rep)
416 return -ENOMEM;
417
418 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
419 if (!err)
420 *node_guid = rep->node_guid;
421
422 kfree(rep);
423 return err;
424 }
425 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
426
427 enum mlx5_vport_roce_state {
428 MLX5_VPORT_ROCE_DISABLED = 0,
429 MLX5_VPORT_ROCE_ENABLED = 1,
430 };
431
432 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
433 enum mlx5_vport_roce_state state)
434 {
435 void *in;
436 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
437 int err;
438
439 in = mlx5_vzalloc(inlen);
440 if (!in) {
441 mlx5_core_warn(mdev, "failed to allocate inbox\n");
442 return -ENOMEM;
443 }
444
445 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
446 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
447 state);
448
449 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
450
451 kvfree(in);
452
453 return err;
454 }
455
456 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
457 {
458 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
459 }
460 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
461
462 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
463 {
464 return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
465 }
466 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);