2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
39 static int _mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
40 u16 vport
, u32
*out
, int outlen
)
42 u32 in
[MLX5_ST_SZ_DW(query_vport_state_in
)] = {0};
44 MLX5_SET(query_vport_state_in
, in
, opcode
,
45 MLX5_CMD_OP_QUERY_VPORT_STATE
);
46 MLX5_SET(query_vport_state_in
, in
, op_mod
, opmod
);
47 MLX5_SET(query_vport_state_in
, in
, vport_number
, vport
);
49 MLX5_SET(query_vport_state_in
, in
, other_vport
, 1);
51 return mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
54 u8
mlx5_query_vport_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
56 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
58 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
60 return MLX5_GET(query_vport_state_out
, out
, state
);
62 EXPORT_SYMBOL_GPL(mlx5_query_vport_state
);
64 u8
mlx5_query_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
, u16 vport
)
66 u32 out
[MLX5_ST_SZ_DW(query_vport_state_out
)] = {0};
68 _mlx5_query_vport_state(mdev
, opmod
, vport
, out
, sizeof(out
));
70 return MLX5_GET(query_vport_state_out
, out
, admin_state
);
72 EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state
);
74 int mlx5_modify_vport_admin_state(struct mlx5_core_dev
*mdev
, u8 opmod
,
77 u32 in
[MLX5_ST_SZ_DW(modify_vport_state_in
)] = {0};
78 u32 out
[MLX5_ST_SZ_DW(modify_vport_state_out
)] = {0};
80 MLX5_SET(modify_vport_state_in
, in
, opcode
,
81 MLX5_CMD_OP_MODIFY_VPORT_STATE
);
82 MLX5_SET(modify_vport_state_in
, in
, op_mod
, opmod
);
83 MLX5_SET(modify_vport_state_in
, in
, vport_number
, vport
);
85 MLX5_SET(modify_vport_state_in
, in
, other_vport
, 1);
86 MLX5_SET(modify_vport_state_in
, in
, admin_state
, state
);
88 return mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, sizeof(out
));
90 EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state
);
92 static int mlx5_query_nic_vport_context(struct mlx5_core_dev
*mdev
, u16 vport
,
95 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)] = {0};
97 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
98 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
99 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
101 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
103 return mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
106 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev
*mdev
, void *in
,
109 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)] = {0};
111 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
112 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
113 return mlx5_cmd_exec(mdev
, in
, inlen
, out
, sizeof(out
));
116 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev
*mdev
,
117 u16 vport
, u8
*min_inline
)
119 u32 out
[MLX5_ST_SZ_DW(query_nic_vport_context_out
)] = {0};
122 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, sizeof(out
));
124 *min_inline
= MLX5_GET(query_nic_vport_context_out
, out
,
125 nic_vport_context
.min_wqe_inline_mode
);
128 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline
);
130 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev
*mdev
,
131 u16 vport
, u8 min_inline
)
133 u32 in
[MLX5_ST_SZ_DW(modify_nic_vport_context_in
)] = {0};
134 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
137 MLX5_SET(modify_nic_vport_context_in
, in
,
138 field_select
.min_inline
, 1);
139 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
140 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
142 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
143 in
, nic_vport_context
);
144 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
145 min_wqe_inline_mode
, min_inline
);
147 return mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
150 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
154 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
158 out
= mlx5_vzalloc(outlen
);
162 out_addr
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
163 nic_vport_context
.permanent_address
);
165 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
167 ether_addr_copy(addr
, &out_addr
[2]);
172 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address
);
174 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev
*mdev
,
178 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
183 in
= mlx5_vzalloc(inlen
);
185 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
189 MLX5_SET(modify_nic_vport_context_in
, in
,
190 field_select
.permanent_address
, 1);
191 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
194 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
196 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
197 in
, nic_vport_context
);
198 perm_mac
= MLX5_ADDR_OF(nic_vport_context
, nic_vport_ctx
,
201 ether_addr_copy(&perm_mac
[2], addr
);
203 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
209 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address
);
211 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16
*mtu
)
213 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
217 out
= mlx5_vzalloc(outlen
);
221 err
= mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
223 *mtu
= MLX5_GET(query_nic_vport_context_out
, out
,
224 nic_vport_context
.mtu
);
229 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu
);
231 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev
*mdev
, u16 mtu
)
233 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
237 in
= mlx5_vzalloc(inlen
);
241 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.mtu
, 1);
242 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.mtu
, mtu
);
244 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
249 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu
);
251 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
253 enum mlx5_list_type list_type
,
254 u8 addr_list
[][ETH_ALEN
],
257 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)] = {0};
266 req_list_size
= *list_size
;
268 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
269 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
270 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
272 if (req_list_size
> max_list_size
) {
273 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max_list_size\n",
274 req_list_size
, max_list_size
);
275 req_list_size
= max_list_size
;
278 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
279 req_list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
281 out
= kzalloc(out_sz
, GFP_KERNEL
);
285 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
286 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
287 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
, list_type
);
288 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
291 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
293 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
297 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
299 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
302 *list_size
= req_list_size
;
303 for (i
= 0; i
< req_list_size
; i
++) {
304 u8
*mac_addr
= MLX5_ADDR_OF(nic_vport_context
,
306 current_uc_mac_address
[i
]) + 2;
307 ether_addr_copy(addr_list
[i
], mac_addr
);
313 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list
);
315 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev
*dev
,
316 enum mlx5_list_type list_type
,
317 u8 addr_list
[][ETH_ALEN
],
320 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
328 max_list_size
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
?
329 1 << MLX5_CAP_GEN(dev
, log_max_current_uc_list
) :
330 1 << MLX5_CAP_GEN(dev
, log_max_current_mc_list
);
332 if (list_size
> max_list_size
)
335 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
336 list_size
* MLX5_ST_SZ_BYTES(mac_address_layout
);
338 memset(out
, 0, sizeof(out
));
339 in
= kzalloc(in_sz
, GFP_KERNEL
);
343 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
344 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
345 MLX5_SET(modify_nic_vport_context_in
, in
,
346 field_select
.addresses_list
, 1);
348 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
351 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
352 allowed_list_type
, list_type
);
353 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
354 allowed_list_size
, list_size
);
356 for (i
= 0; i
< list_size
; i
++) {
357 u8
*curr_mac
= MLX5_ADDR_OF(nic_vport_context
,
359 current_uc_mac_address
[i
]) + 2;
360 ether_addr_copy(curr_mac
, addr_list
[i
]);
363 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
367 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list
);
369 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev
*dev
,
374 u32 in
[MLX5_ST_SZ_DW(query_nic_vport_context_in
)];
383 req_list_size
= *size
;
384 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
385 if (req_list_size
> max_list_size
) {
386 mlx5_core_warn(dev
, "Requested list size (%d) > (%d) max list size\n",
387 req_list_size
, max_list_size
);
388 req_list_size
= max_list_size
;
391 out_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
392 req_list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
394 memset(in
, 0, sizeof(in
));
395 out
= kzalloc(out_sz
, GFP_KERNEL
);
399 MLX5_SET(query_nic_vport_context_in
, in
, opcode
,
400 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
);
401 MLX5_SET(query_nic_vport_context_in
, in
, allowed_list_type
,
402 MLX5_NVPRT_LIST_TYPE_VLAN
);
403 MLX5_SET(query_nic_vport_context_in
, in
, vport_number
, vport
);
406 MLX5_SET(query_nic_vport_context_in
, in
, other_vport
, 1);
408 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
412 nic_vport_ctx
= MLX5_ADDR_OF(query_nic_vport_context_out
, out
,
414 req_list_size
= MLX5_GET(nic_vport_context
, nic_vport_ctx
,
417 *size
= req_list_size
;
418 for (i
= 0; i
< req_list_size
; i
++) {
419 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
421 current_uc_mac_address
[i
]);
422 vlans
[i
] = MLX5_GET(vlan_layout
, vlan_addr
, vlan
);
428 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans
);
430 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev
*dev
,
434 u32 out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)];
442 max_list_size
= 1 << MLX5_CAP_GEN(dev
, log_max_vlan_list
);
444 if (list_size
> max_list_size
)
447 in_sz
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
) +
448 list_size
* MLX5_ST_SZ_BYTES(vlan_layout
);
450 memset(out
, 0, sizeof(out
));
451 in
= kzalloc(in_sz
, GFP_KERNEL
);
455 MLX5_SET(modify_nic_vport_context_in
, in
, opcode
,
456 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
457 MLX5_SET(modify_nic_vport_context_in
, in
,
458 field_select
.addresses_list
, 1);
460 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
, in
,
463 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
464 allowed_list_type
, MLX5_NVPRT_LIST_TYPE_VLAN
);
465 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
466 allowed_list_size
, list_size
);
468 for (i
= 0; i
< list_size
; i
++) {
469 void *vlan_addr
= MLX5_ADDR_OF(nic_vport_context
,
471 current_uc_mac_address
[i
]);
472 MLX5_SET(vlan_layout
, vlan_addr
, vlan
, vlans
[i
]);
475 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
479 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans
);
481 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev
*mdev
,
482 u64
*system_image_guid
)
485 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
487 out
= mlx5_vzalloc(outlen
);
491 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
493 *system_image_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
494 nic_vport_context
.system_image_guid
);
500 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid
);
502 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev
*mdev
, u64
*node_guid
)
505 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
507 out
= mlx5_vzalloc(outlen
);
511 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
513 *node_guid
= MLX5_GET64(query_nic_vport_context_out
, out
,
514 nic_vport_context
.node_guid
);
520 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid
);
522 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev
*mdev
,
523 u32 vport
, u64 node_guid
)
525 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
526 void *nic_vport_context
;
532 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
534 if (!MLX5_CAP_ESW(mdev
, nic_vport_node_guid_modify
))
537 in
= mlx5_vzalloc(inlen
);
541 MLX5_SET(modify_nic_vport_context_in
, in
,
542 field_select
.node_guid
, 1);
543 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
544 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, !!vport
);
546 nic_vport_context
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
547 in
, nic_vport_context
);
548 MLX5_SET64(nic_vport_context
, nic_vport_context
, node_guid
, node_guid
);
550 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
557 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev
*mdev
,
561 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
563 out
= mlx5_vzalloc(outlen
);
567 mlx5_query_nic_vport_context(mdev
, 0, out
, outlen
);
569 *qkey_viol_cntr
= MLX5_GET(query_nic_vport_context_out
, out
,
570 nic_vport_context
.qkey_violation_counter
);
576 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr
);
578 int mlx5_query_hca_vport_gid(struct mlx5_core_dev
*dev
, u8 other_vport
,
579 u8 port_num
, u16 vf_num
, u16 gid_index
,
582 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_in
);
583 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
584 int is_group_manager
;
592 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
593 tbsz
= mlx5_get_gid_table_len(MLX5_CAP_GEN(dev
, gid_table_size
));
594 mlx5_core_dbg(dev
, "vf_num %d, index %d, gid_table_size %d\n",
595 vf_num
, gid_index
, tbsz
);
597 if (gid_index
> tbsz
&& gid_index
!= 0xffff)
600 if (gid_index
== 0xffff)
605 out_sz
+= nout
* sizeof(*gid
);
607 in
= kzalloc(in_sz
, GFP_KERNEL
);
608 out
= kzalloc(out_sz
, GFP_KERNEL
);
614 MLX5_SET(query_hca_vport_gid_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_GID
);
616 if (is_group_manager
) {
617 MLX5_SET(query_hca_vport_gid_in
, in
, vport_number
, vf_num
);
618 MLX5_SET(query_hca_vport_gid_in
, in
, other_vport
, 1);
624 MLX5_SET(query_hca_vport_gid_in
, in
, gid_index
, gid_index
);
626 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
627 MLX5_SET(query_hca_vport_gid_in
, in
, port_num
, port_num
);
629 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
633 tmp
= out
+ MLX5_ST_SZ_BYTES(query_hca_vport_gid_out
);
634 gid
->global
.subnet_prefix
= tmp
->global
.subnet_prefix
;
635 gid
->global
.interface_id
= tmp
->global
.interface_id
;
642 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid
);
644 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev
*dev
, u8 other_vport
,
645 u8 port_num
, u16 vf_num
, u16 pkey_index
,
648 int in_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in
);
649 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out
);
650 int is_group_manager
;
659 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
661 tbsz
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
));
662 if (pkey_index
> tbsz
&& pkey_index
!= 0xffff)
665 if (pkey_index
== 0xffff)
670 out_sz
+= nout
* MLX5_ST_SZ_BYTES(pkey
);
672 in
= kzalloc(in_sz
, GFP_KERNEL
);
673 out
= kzalloc(out_sz
, GFP_KERNEL
);
679 MLX5_SET(query_hca_vport_pkey_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
);
681 if (is_group_manager
) {
682 MLX5_SET(query_hca_vport_pkey_in
, in
, vport_number
, vf_num
);
683 MLX5_SET(query_hca_vport_pkey_in
, in
, other_vport
, 1);
689 MLX5_SET(query_hca_vport_pkey_in
, in
, pkey_index
, pkey_index
);
691 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
692 MLX5_SET(query_hca_vport_pkey_in
, in
, port_num
, port_num
);
694 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
698 pkarr
= MLX5_ADDR_OF(query_hca_vport_pkey_out
, out
, pkey
);
699 for (i
= 0; i
< nout
; i
++, pkey
++, pkarr
+= MLX5_ST_SZ_BYTES(pkey
))
700 *pkey
= MLX5_GET_PR(pkey
, pkarr
, pkey
);
707 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey
);
709 int mlx5_query_hca_vport_context(struct mlx5_core_dev
*dev
,
710 u8 other_vport
, u8 port_num
,
712 struct mlx5_hca_vport_context
*rep
)
714 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_vport_context_out
);
715 int in
[MLX5_ST_SZ_DW(query_hca_vport_context_in
)] = {0};
716 int is_group_manager
;
721 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
723 out
= kzalloc(out_sz
, GFP_KERNEL
);
727 MLX5_SET(query_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
);
730 if (is_group_manager
) {
731 MLX5_SET(query_hca_vport_context_in
, in
, other_vport
, 1);
732 MLX5_SET(query_hca_vport_context_in
, in
, vport_number
, vf_num
);
739 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
740 MLX5_SET(query_hca_vport_context_in
, in
, port_num
, port_num
);
742 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
746 ctx
= MLX5_ADDR_OF(query_hca_vport_context_out
, out
, hca_vport_context
);
747 rep
->field_select
= MLX5_GET_PR(hca_vport_context
, ctx
, field_select
);
748 rep
->sm_virt_aware
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_virt_aware
);
749 rep
->has_smi
= MLX5_GET_PR(hca_vport_context
, ctx
, has_smi
);
750 rep
->has_raw
= MLX5_GET_PR(hca_vport_context
, ctx
, has_raw
);
751 rep
->policy
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state_policy
);
752 rep
->phys_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
753 port_physical_state
);
754 rep
->vport_state
= MLX5_GET_PR(hca_vport_context
, ctx
, vport_state
);
755 rep
->port_physical_state
= MLX5_GET_PR(hca_vport_context
, ctx
,
756 port_physical_state
);
757 rep
->port_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, port_guid
);
758 rep
->node_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
, node_guid
);
759 rep
->cap_mask1
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask1
);
760 rep
->cap_mask1_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
761 cap_mask1_field_select
);
762 rep
->cap_mask2
= MLX5_GET_PR(hca_vport_context
, ctx
, cap_mask2
);
763 rep
->cap_mask2_perm
= MLX5_GET_PR(hca_vport_context
, ctx
,
764 cap_mask2_field_select
);
765 rep
->lid
= MLX5_GET_PR(hca_vport_context
, ctx
, lid
);
766 rep
->init_type_reply
= MLX5_GET_PR(hca_vport_context
, ctx
,
768 rep
->lmc
= MLX5_GET_PR(hca_vport_context
, ctx
, lmc
);
769 rep
->subnet_timeout
= MLX5_GET_PR(hca_vport_context
, ctx
,
771 rep
->sm_lid
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_lid
);
772 rep
->sm_sl
= MLX5_GET_PR(hca_vport_context
, ctx
, sm_sl
);
773 rep
->qkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
774 qkey_violation_counter
);
775 rep
->pkey_violation_counter
= MLX5_GET_PR(hca_vport_context
, ctx
,
776 pkey_violation_counter
);
777 rep
->grh_required
= MLX5_GET_PR(hca_vport_context
, ctx
, grh_required
);
778 rep
->sys_image_guid
= MLX5_GET64_PR(hca_vport_context
, ctx
,
785 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context
);
787 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev
*dev
,
790 struct mlx5_hca_vport_context
*rep
;
793 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
797 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
799 *sys_image_guid
= rep
->sys_image_guid
;
804 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid
);
806 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev
*dev
,
809 struct mlx5_hca_vport_context
*rep
;
812 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
816 err
= mlx5_query_hca_vport_context(dev
, 0, 1, 0, rep
);
818 *node_guid
= rep
->node_guid
;
823 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid
);
825 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
832 int outlen
= MLX5_ST_SZ_BYTES(query_nic_vport_context_out
);
835 out
= kzalloc(outlen
, GFP_KERNEL
);
839 err
= mlx5_query_nic_vport_context(mdev
, vport
, out
, outlen
);
843 *promisc_uc
= MLX5_GET(query_nic_vport_context_out
, out
,
844 nic_vport_context
.promisc_uc
);
845 *promisc_mc
= MLX5_GET(query_nic_vport_context_out
, out
,
846 nic_vport_context
.promisc_mc
);
847 *promisc_all
= MLX5_GET(query_nic_vport_context_out
, out
,
848 nic_vport_context
.promisc_all
);
854 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc
);
856 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev
*mdev
,
862 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
865 in
= mlx5_vzalloc(inlen
);
867 mlx5_core_err(mdev
, "failed to allocate inbox\n");
871 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.promisc
, 1);
872 MLX5_SET(modify_nic_vport_context_in
, in
,
873 nic_vport_context
.promisc_uc
, promisc_uc
);
874 MLX5_SET(modify_nic_vport_context_in
, in
,
875 nic_vport_context
.promisc_mc
, promisc_mc
);
876 MLX5_SET(modify_nic_vport_context_in
, in
,
877 nic_vport_context
.promisc_all
, promisc_all
);
879 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
885 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc
);
887 enum mlx5_vport_roce_state
{
888 MLX5_VPORT_ROCE_DISABLED
= 0,
889 MLX5_VPORT_ROCE_ENABLED
= 1,
892 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev
*mdev
,
893 enum mlx5_vport_roce_state state
)
896 int inlen
= MLX5_ST_SZ_BYTES(modify_nic_vport_context_in
);
899 in
= mlx5_vzalloc(inlen
);
901 mlx5_core_warn(mdev
, "failed to allocate inbox\n");
905 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.roce_en
, 1);
906 MLX5_SET(modify_nic_vport_context_in
, in
, nic_vport_context
.roce_en
,
909 err
= mlx5_modify_nic_vport_context(mdev
, in
, inlen
);
916 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev
*mdev
)
918 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_ENABLED
);
920 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce
);
922 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev
*mdev
)
924 return mlx5_nic_vport_update_roce_state(mdev
, MLX5_VPORT_ROCE_DISABLED
);
926 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce
);
928 int mlx5_core_query_vport_counter(struct mlx5_core_dev
*dev
, u8 other_vport
,
929 int vf
, u8 port_num
, void *out
,
932 int in_sz
= MLX5_ST_SZ_BYTES(query_vport_counter_in
);
933 int is_group_manager
;
937 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
938 in
= mlx5_vzalloc(in_sz
);
944 MLX5_SET(query_vport_counter_in
, in
, opcode
,
945 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
947 if (is_group_manager
) {
948 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 1);
949 MLX5_SET(query_vport_counter_in
, in
, vport_number
, vf
+ 1);
955 if (MLX5_CAP_GEN(dev
, num_ports
) == 2)
956 MLX5_SET(query_vport_counter_in
, in
, port_num
, port_num
);
958 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, out_sz
);
963 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter
);
965 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev
*dev
,
966 u8 other_vport
, u8 port_num
,
968 struct mlx5_hca_vport_context
*req
)
970 int in_sz
= MLX5_ST_SZ_BYTES(modify_hca_vport_context_in
);
971 u8 out
[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out
)];
972 int is_group_manager
;
977 mlx5_core_dbg(dev
, "vf %d\n", vf
);
978 is_group_manager
= MLX5_CAP_GEN(dev
, vport_group_manager
);
979 in
= kzalloc(in_sz
, GFP_KERNEL
);
983 memset(out
, 0, sizeof(out
));
984 MLX5_SET(modify_hca_vport_context_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
);
986 if (is_group_manager
) {
987 MLX5_SET(modify_hca_vport_context_in
, in
, other_vport
, 1);
988 MLX5_SET(modify_hca_vport_context_in
, in
, vport_number
, vf
);
995 if (MLX5_CAP_GEN(dev
, num_ports
) > 1)
996 MLX5_SET(modify_hca_vport_context_in
, in
, port_num
, port_num
);
998 ctx
= MLX5_ADDR_OF(modify_hca_vport_context_in
, in
, hca_vport_context
);
999 MLX5_SET(hca_vport_context
, ctx
, field_select
, req
->field_select
);
1000 MLX5_SET(hca_vport_context
, ctx
, sm_virt_aware
, req
->sm_virt_aware
);
1001 MLX5_SET(hca_vport_context
, ctx
, has_smi
, req
->has_smi
);
1002 MLX5_SET(hca_vport_context
, ctx
, has_raw
, req
->has_raw
);
1003 MLX5_SET(hca_vport_context
, ctx
, vport_state_policy
, req
->policy
);
1004 MLX5_SET(hca_vport_context
, ctx
, port_physical_state
, req
->phys_state
);
1005 MLX5_SET(hca_vport_context
, ctx
, vport_state
, req
->vport_state
);
1006 MLX5_SET64(hca_vport_context
, ctx
, port_guid
, req
->port_guid
);
1007 MLX5_SET64(hca_vport_context
, ctx
, node_guid
, req
->node_guid
);
1008 MLX5_SET(hca_vport_context
, ctx
, cap_mask1
, req
->cap_mask1
);
1009 MLX5_SET(hca_vport_context
, ctx
, cap_mask1_field_select
, req
->cap_mask1_perm
);
1010 MLX5_SET(hca_vport_context
, ctx
, cap_mask2
, req
->cap_mask2
);
1011 MLX5_SET(hca_vport_context
, ctx
, cap_mask2_field_select
, req
->cap_mask2_perm
);
1012 MLX5_SET(hca_vport_context
, ctx
, lid
, req
->lid
);
1013 MLX5_SET(hca_vport_context
, ctx
, init_type_reply
, req
->init_type_reply
);
1014 MLX5_SET(hca_vport_context
, ctx
, lmc
, req
->lmc
);
1015 MLX5_SET(hca_vport_context
, ctx
, subnet_timeout
, req
->subnet_timeout
);
1016 MLX5_SET(hca_vport_context
, ctx
, sm_lid
, req
->sm_lid
);
1017 MLX5_SET(hca_vport_context
, ctx
, sm_sl
, req
->sm_sl
);
1018 MLX5_SET(hca_vport_context
, ctx
, qkey_violation_counter
, req
->qkey_violation_counter
);
1019 MLX5_SET(hca_vport_context
, ctx
, pkey_violation_counter
, req
->pkey_violation_counter
);
1020 err
= mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
1025 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context
);