2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 u8
mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
)
41 u32 in
[MLX5_ST_SZ_DW(query_vport_state_in
)];
42 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)];
45 memset(in
, 0, sizeof(in
));
47 MLX5_SET(query_vport_state_in
, in
, opcode
,
48 MLX5_CMD_OP_QUERY_VPORT_STATE
);
49 MLX5_SET(query_vport_state_in
, in
, op_mod
, opmod
);
51 err
= mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
,
54 mlx5_core_warn(mdev
, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
56 return MLX5_GET(query_vport_state_out
, out
, state
);
58 EXPORT_SYMBOL(mlx5_query_vport_state
);
60 static int mlx5_query_nic_vport_context(struct mlx5_core_dev
*mdev
, u32
*out
,
63 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
65 memset(in
, 0, sizeof(in
));
67 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
68 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
70 return mlx5_cmd_exec_check_status(mdev
, in
, sizeof(in
), out
, outlen
);
73 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev
*mdev
, void *in
,
76 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
78 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
79 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
81 return mlx5_cmd_exec_check_status(mdev
, in
, inlen
, out
, sizeof(out
));
84 void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev
*mdev
, u8
*addr
)
87 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
91 out
= mlx5_vzalloc(outlen
);
95 out_addr
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
96 nic_vport_context
.permanent_address
);
98 err
= mlx5_query_nic_vport_context(mdev
, out
, outlen
);
100 ether_addr_copy(addr
, &out_addr
[2]);
104 EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address
);
106 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev
*mdev
,
107 u64
*system_image_guid
)
110 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
112 out
= mlx5_vzalloc(outlen
);
116 mlx5_query_nic_vport_context(mdev
, out
, outlen
);
118 *system_image_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
119 nic_vport_context
.system_image_guid
);
125 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid
);
127 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev
*mdev
, u64
*node_guid
)
130 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
132 out
= mlx5_vzalloc(outlen
);
136 mlx5_query_nic_vport_context(mdev
, out
, outlen
);
138 *node_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
139 nic_vport_context
.node_guid
);
145 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid
);
147 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev
*mdev
,
151 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
153 out
= mlx5_vzalloc(outlen
);
157 mlx5_query_nic_vport_context(mdev
, out
, outlen
);
159 *qkey_viol_cntr
= MLX5_GET(query_nic_vport_context_out
, out
,
160 nic_vport_context
.qkey_violation_counter
);
166 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr
);
168 int mlx5_query_hca_vport_gid(struct mlx5_core_dev
*dev
, u8 other_vport
,
169 u8 port_num
, u16 vf_num
, u16 gid_index
,
172 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_in
);
173 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
174 int is_group_manager
;
182 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
183 tbsz
= mlx5_get_gid_table_len(MLX5_CAP_GEN(dev
, gid_table_size
));
184 mlx5_core_dbg(dev
, "vf_num %d, index %d, gid_table_size %d\n",
185 vf_num
, gid_index
, tbsz
);
187 if (gid_index
> tbsz
&& gid_index
!= 0xffff)
190 if (gid_index
== 0xffff)
195 out_sz
+= nout
* sizeof(*gid
);
197 in
= kzalloc(in_sz
, GFP_KERNEL
);
198 out
= kzalloc(out_sz
, GFP_KERNEL
);
204 MLX5_SET(query_hca_vport_gid_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_GID
);
206 if (is_group_manager
) {
207 MLX5_SET(query_hca_vport_gid_in
, in
, vport_number
, vf_num
);
208 MLX5_SET(query_hca_vport_gid_in
, in
, other_vport
, 1);
214 MLX5_SET(query_hca_vport_gid_in
, in
, gid_index
, gid_index
);
216 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
217 MLX5_SET(query_hca_vport_gid_in
, in
, port_num
, port_num
);
219 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
223 err
= mlx5_cmd_status_to_err_v2(out
);
227 tmp
= out
+ MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
228 gid
->global
.subnet_prefix
= tmp
->global
.subnet_prefix
;
229 gid
->global
.interface_id
= tmp
->global
.interface_id
;
236 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid
);
238 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev
*dev
, u8 other_vport
,
239 u8 port_num
, u16 vf_num
, u16 pkey_index
,
242 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in
);
243 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out
);
244 int is_group_manager
;
253 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
255 tbsz
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
));
256 if (pkey_index
> tbsz
&& pkey_index
!= 0xffff)
259 if (pkey_index
== 0xffff)
264 out_sz
+= nout
* MLX5_ST_SZ_BYTES(pkey
);
266 in
= kzalloc(in_sz
, GFP_KERNEL
);
267 out
= kzalloc(out_sz
, GFP_KERNEL
);
273 MLX5_SET(query_hca_vport_pkey_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
);
275 if (is_group_manager
) {
276 MLX5_SET(query_hca_vport_pkey_in
, in
, vport_number
, vf_num
);
277 MLX5_SET(query_hca_vport_pkey_in
, in
, other_vport
, 1);
283 MLX5_SET(query_hca_vport_pkey_in
, in
, pkey_index
, pkey_index
);
285 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
286 MLX5_SET(query_hca_vport_pkey_in
, in
, port_num
, port_num
);
288 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
292 err
= mlx5_cmd_status_to_err_v2(out
);
296 pkarr
= MLX5_ADDR_OF(query_hca_vport_pkey_out
, out
, pkey
);
297 for (i
= 0; i
< nout
; i
++, pkey
++, pkarr
+= MLX5_ST_SZ_BYTES(pkey
))
298 *pkey
= MLX5_GET_PR(pkey
, pkarr
, pkey
);
305 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey
);
307 int mlx5_query_hca_vport_context(struct mlx5_core_dev
*dev
,
308 u8 other_vport
, u8 port_num
,
310 struct mlx5_hca_vport_context
*rep
)
312 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_context_out
);
313 int in
[MLX5_ST_SZ_DW(query_hca_vport_context_in
)];
314 int is_group_manager
;
319 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
321 memset(in
, 0, sizeof(in
));
322 out
= kzalloc(out_sz
, GFP_KERNEL
);
326 MLX5_SET(query_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
);
329 if (is_group_manager
) {
330 MLX5_SET(query_hca_vport_context_in
, in
, other_vport
, 1);
331 MLX5_SET(query_hca_vport_context_in
, in
, vport_number
, vf_num
);
338 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
339 MLX5_SET(query_hca_vport_context_in
, in
, port_num
, port_num
);
341 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
344 err
= mlx5_cmd_status_to_err_v2(out
);
348 ctx
= MLX5_ADDR_OF(query_hca_vport_context_out
, out
, hca_vport_context
);
349 rep
->field_select
= MLX5_GET_PR(hca_vport_context
, ctx
, field_select
);
350 rep
->sm_virt_aware
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_virt_aware
);
351 rep
->has_smi
= MLX5_GET_PR(hca_vport_context
, ctx
, has_smi
);
352 rep
->has_raw
= MLX5_GET_PR(hca_vport_context
, ctx
, has_raw
);
353 rep
->policy
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state_policy
);
354 rep
->phys_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
355 port_physical_state
);
356 rep
->vport_state
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state
);
357 rep
->port_physical_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
358 port_physical_state
);
359 rep
->port_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, port_guid
);
360 rep
->node_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, node_guid
);
361 rep
->cap_mask1
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask1
);
362 rep
->cap_mask1_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
363 cap_mask1_field_select
);
364 rep
->cap_mask2
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask2
);
365 rep
->cap_mask2_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
366 cap_mask2_field_select
);
367 rep
->lid
= MLX5_GET_PR(hca_vport_context
, ctx
, lid
);
368 rep
->init_type_reply
= MLX5_GET_PR(hca_vport_context
, ctx
,
370 rep
->lmc
= MLX5_GET_PR(hca_vport_context
, ctx
, lmc
);
371 rep
->subnet_timeout
= MLX5_GET_PR(hca_vport_context
, ctx
,
373 rep
->sm_lid
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_lid
);
374 rep
->sm_sl
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_sl
);
375 rep
->qkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
376 qkey_violation_counter
);
377 rep
->pkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
378 pkey_violation_counter
);
379 rep
->grh_required
= MLX5_GET_PR(hca_vport_context
, ctx
, grh_required
);
380 rep
->sys_image_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
,
387 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context
);
389 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev
*dev
,
392 struct mlx5_hca_vport_context
*rep
;
395 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
399 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
401 *sys_image_guid
= rep
->sys_image_guid
;
406 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid
);
408 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev
*dev
,
411 struct mlx5_hca_vport_context
*rep
;
414 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
418 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
420 *node_guid
= rep
->node_guid
;
425 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid
);
427 enum mlx5_vport_roce_state
{
428 MLX5_VPORT_ROCE_DISABLED
= 0,
429 MLX5_VPORT_ROCE_ENABLED
= 1,
432 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev
*mdev
,
433 enum mlx5_vport_roce_state state
)
436 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
439 in
= mlx5_vzalloc(inlen
);
441 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
445 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.roce_en
, 1);
446 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.roce_en
,
449 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
456 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev
*mdev
)
458 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_ENABLED
);
460 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce
);
462 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev
*mdev
)
464 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_DISABLED
);
466 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce
);