2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_vlan.h>
36 #include <linux/export.h>
38 #include <linux/mlx4/cmd.h>
41 #include "mlx4_stats.h"
43 #define MLX4_MAC_VALID (1ull << 63)
45 #define MLX4_VLAN_VALID (1u << 31)
46 #define MLX4_VLAN_MASK 0xfff
48 void mlx4_init_mac_table(struct mlx4_dev
*dev
, struct mlx4_mac_table
*table
)
52 mutex_init(&table
->mutex
);
53 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
54 table
->entries
[i
] = 0;
57 table
->max
= 1 << dev
->caps
.log_num_macs
;
61 void mlx4_init_vlan_table(struct mlx4_dev
*dev
, struct mlx4_vlan_table
*table
)
65 mutex_init(&table
->mutex
);
66 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; i
++) {
67 table
->entries
[i
] = 0;
70 table
->max
= (1 << dev
->caps
.log_num_vlans
) - MLX4_VLAN_REGULAR
;
74 void mlx4_init_roce_gid_table(struct mlx4_dev
*dev
,
75 struct mlx4_roce_gid_table
*table
)
79 mutex_init(&table
->mutex
);
80 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++)
81 memset(table
->roce_gids
[i
].raw
, 0, MLX4_ROCE_GID_ENTRY_SIZE
);
84 static int validate_index(struct mlx4_dev
*dev
,
85 struct mlx4_mac_table
*table
, int index
)
89 if (index
< 0 || index
>= table
->max
|| !table
->entries
[index
]) {
90 mlx4_warn(dev
, "No valid Mac entry for the given index\n");
96 static int find_index(struct mlx4_dev
*dev
,
97 struct mlx4_mac_table
*table
, u64 mac
)
101 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
102 if (table
->refs
[i
] &&
103 (MLX4_MAC_MASK
& mac
) ==
104 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
])))
111 static int mlx4_set_port_mac_table(struct mlx4_dev
*dev
, u8 port
,
114 struct mlx4_cmd_mailbox
*mailbox
;
118 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
120 return PTR_ERR(mailbox
);
122 memcpy(mailbox
->buf
, entries
, MLX4_MAC_TABLE_SIZE
);
124 in_mod
= MLX4_SET_PORT_MAC_TABLE
<< 8 | port
;
126 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
127 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
129 mlx4_free_cmd_mailbox(dev
, mailbox
);
133 int mlx4_find_cached_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
, int *idx
)
135 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
136 struct mlx4_mac_table
*table
= &info
->mac_table
;
139 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
143 if (mac
== (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
151 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac
);
153 int __mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
155 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
156 struct mlx4_mac_table
*table
= &info
->mac_table
;
160 mlx4_dbg(dev
, "Registering MAC: 0x%llx for port %d\n",
161 (unsigned long long) mac
, port
);
163 mutex_lock(&table
->mutex
);
164 for (i
= 0; i
< MLX4_MAX_MAC_NUM
; i
++) {
165 if (!table
->refs
[i
]) {
171 if ((MLX4_MAC_MASK
& mac
) ==
172 (MLX4_MAC_MASK
& be64_to_cpu(table
->entries
[i
]))) {
173 /* MAC already registered, increment ref count */
180 mlx4_dbg(dev
, "Free MAC index is %d\n", free
);
182 if (table
->total
== table
->max
) {
183 /* No free mac entries */
188 /* Register new MAC */
189 table
->entries
[free
] = cpu_to_be64(mac
| MLX4_MAC_VALID
);
191 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
193 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
194 (unsigned long long) mac
);
195 table
->entries
[free
] = 0;
198 table
->refs
[free
] = 1;
202 mutex_unlock(&table
->mutex
);
205 EXPORT_SYMBOL_GPL(__mlx4_register_mac
);
207 int mlx4_register_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
212 if (mlx4_is_mfunc(dev
)) {
213 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
214 err
= mlx4_cmd_imm(dev
, mac
, &out_param
,
215 ((u32
) port
) << 8 | (u32
) RES_MAC
,
216 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
217 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
219 if (err
&& err
== -EINVAL
&& mlx4_is_slave(dev
)) {
220 /* retry using old REG_MAC format */
221 set_param_l(&out_param
, port
);
222 err
= mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
223 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
224 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
226 dev
->flags
|= MLX4_FLAG_OLD_REG_MAC
;
231 return get_param_l(&out_param
);
233 return __mlx4_register_mac(dev
, port
, mac
);
235 EXPORT_SYMBOL_GPL(mlx4_register_mac
);
237 int mlx4_get_base_qpn(struct mlx4_dev
*dev
, u8 port
)
239 return dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
240 (port
- 1) * (1 << dev
->caps
.log_num_macs
);
242 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn
);
244 void __mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
246 struct mlx4_port_info
*info
;
247 struct mlx4_mac_table
*table
;
250 if (port
< 1 || port
> dev
->caps
.num_ports
) {
251 mlx4_warn(dev
, "invalid port number (%d), aborting...\n", port
);
254 info
= &mlx4_priv(dev
)->port
[port
];
255 table
= &info
->mac_table
;
256 mutex_lock(&table
->mutex
);
257 index
= find_index(dev
, table
, mac
);
259 if (validate_index(dev
, table
, index
))
261 if (--table
->refs
[index
]) {
262 mlx4_dbg(dev
, "Have more references for index %d, no need to modify mac table\n",
267 table
->entries
[index
] = 0;
268 mlx4_set_port_mac_table(dev
, port
, table
->entries
);
271 mutex_unlock(&table
->mutex
);
273 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac
);
275 void mlx4_unregister_mac(struct mlx4_dev
*dev
, u8 port
, u64 mac
)
279 if (mlx4_is_mfunc(dev
)) {
280 if (!(dev
->flags
& MLX4_FLAG_OLD_REG_MAC
)) {
281 (void) mlx4_cmd_imm(dev
, mac
, &out_param
,
282 ((u32
) port
) << 8 | (u32
) RES_MAC
,
283 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
284 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
286 /* use old unregister mac format */
287 set_param_l(&out_param
, port
);
288 (void) mlx4_cmd_imm(dev
, mac
, &out_param
, RES_MAC
,
289 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_FREE_RES
,
290 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
294 __mlx4_unregister_mac(dev
, port
, mac
);
297 EXPORT_SYMBOL_GPL(mlx4_unregister_mac
);
299 int __mlx4_replace_mac(struct mlx4_dev
*dev
, u8 port
, int qpn
, u64 new_mac
)
301 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
302 struct mlx4_mac_table
*table
= &info
->mac_table
;
303 int index
= qpn
- info
->base_qpn
;
306 /* CX1 doesn't support multi-functions */
307 mutex_lock(&table
->mutex
);
309 err
= validate_index(dev
, table
, index
);
313 table
->entries
[index
] = cpu_to_be64(new_mac
| MLX4_MAC_VALID
);
315 err
= mlx4_set_port_mac_table(dev
, port
, table
->entries
);
317 mlx4_err(dev
, "Failed adding MAC: 0x%llx\n",
318 (unsigned long long) new_mac
);
319 table
->entries
[index
] = 0;
322 mutex_unlock(&table
->mutex
);
325 EXPORT_SYMBOL_GPL(__mlx4_replace_mac
);
327 static int mlx4_set_port_vlan_table(struct mlx4_dev
*dev
, u8 port
,
330 struct mlx4_cmd_mailbox
*mailbox
;
334 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
336 return PTR_ERR(mailbox
);
338 memcpy(mailbox
->buf
, entries
, MLX4_VLAN_TABLE_SIZE
);
339 in_mod
= MLX4_SET_PORT_VLAN_TABLE
<< 8 | port
;
340 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
341 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
343 mlx4_free_cmd_mailbox(dev
, mailbox
);
348 int mlx4_find_cached_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vid
, int *idx
)
350 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
353 for (i
= 0; i
< MLX4_MAX_VLAN_NUM
; ++i
) {
354 if (table
->refs
[i
] &&
355 (vid
== (MLX4_VLAN_MASK
&
356 be32_to_cpu(table
->entries
[i
])))) {
357 /* VLAN already registered, increase reference count */
365 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan
);
367 int __mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
,
370 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
374 mutex_lock(&table
->mutex
);
376 if (table
->total
== table
->max
) {
377 /* No free vlan entries */
382 for (i
= MLX4_VLAN_REGULAR
; i
< MLX4_MAX_VLAN_NUM
; i
++) {
383 if (free
< 0 && (table
->refs
[i
] == 0)) {
388 if (table
->refs
[i
] &&
389 (vlan
== (MLX4_VLAN_MASK
&
390 be32_to_cpu(table
->entries
[i
])))) {
391 /* Vlan already registered, increase references count */
403 /* Register new VLAN */
404 table
->refs
[free
] = 1;
405 table
->entries
[free
] = cpu_to_be32(vlan
| MLX4_VLAN_VALID
);
407 err
= mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
409 mlx4_warn(dev
, "Failed adding vlan: %u\n", vlan
);
410 table
->refs
[free
] = 0;
411 table
->entries
[free
] = 0;
418 mutex_unlock(&table
->mutex
);
422 int mlx4_register_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
, int *index
)
430 if (mlx4_is_mfunc(dev
)) {
431 err
= mlx4_cmd_imm(dev
, vlan
, &out_param
,
432 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
433 RES_OP_RESERVE_AND_MAP
, MLX4_CMD_ALLOC_RES
,
434 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
436 *index
= get_param_l(&out_param
);
440 return __mlx4_register_vlan(dev
, port
, vlan
, index
);
442 EXPORT_SYMBOL_GPL(mlx4_register_vlan
);
444 void __mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
446 struct mlx4_vlan_table
*table
= &mlx4_priv(dev
)->port
[port
].vlan_table
;
449 mutex_lock(&table
->mutex
);
450 if (mlx4_find_cached_vlan(dev
, port
, vlan
, &index
)) {
451 mlx4_warn(dev
, "vlan 0x%x is not in the vlan table\n", vlan
);
455 if (index
< MLX4_VLAN_REGULAR
) {
456 mlx4_warn(dev
, "Trying to free special vlan index %d\n", index
);
460 if (--table
->refs
[index
]) {
461 mlx4_dbg(dev
, "Have %d more references for index %d, no need to modify vlan table\n",
462 table
->refs
[index
], index
);
465 table
->entries
[index
] = 0;
466 mlx4_set_port_vlan_table(dev
, port
, table
->entries
);
469 mutex_unlock(&table
->mutex
);
472 void mlx4_unregister_vlan(struct mlx4_dev
*dev
, u8 port
, u16 vlan
)
476 if (mlx4_is_mfunc(dev
)) {
477 (void) mlx4_cmd_imm(dev
, vlan
, &out_param
,
478 ((u32
) port
) << 8 | (u32
) RES_VLAN
,
479 RES_OP_RESERVE_AND_MAP
,
480 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
484 __mlx4_unregister_vlan(dev
, port
, vlan
);
486 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan
);
488 int mlx4_get_port_ib_caps(struct mlx4_dev
*dev
, u8 port
, __be32
*caps
)
490 struct mlx4_cmd_mailbox
*inmailbox
, *outmailbox
;
494 inmailbox
= mlx4_alloc_cmd_mailbox(dev
);
495 if (IS_ERR(inmailbox
))
496 return PTR_ERR(inmailbox
);
498 outmailbox
= mlx4_alloc_cmd_mailbox(dev
);
499 if (IS_ERR(outmailbox
)) {
500 mlx4_free_cmd_mailbox(dev
, inmailbox
);
501 return PTR_ERR(outmailbox
);
504 inbuf
= inmailbox
->buf
;
505 outbuf
= outmailbox
->buf
;
510 *(__be16
*) (&inbuf
[16]) = cpu_to_be16(0x0015);
511 *(__be32
*) (&inbuf
[20]) = cpu_to_be32(port
);
513 err
= mlx4_cmd_box(dev
, inmailbox
->dma
, outmailbox
->dma
, port
, 3,
514 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
517 *caps
= *(__be32
*) (outbuf
+ 84);
518 mlx4_free_cmd_mailbox(dev
, inmailbox
);
519 mlx4_free_cmd_mailbox(dev
, outmailbox
);
522 static struct mlx4_roce_gid_entry zgid_entry
;
524 int mlx4_get_slave_num_gids(struct mlx4_dev
*dev
, int slave
, int port
)
527 int slave_gid
= slave
;
529 struct mlx4_slaves_pport slaves_pport
;
530 struct mlx4_active_ports actv_ports
;
531 unsigned max_port_p_one
;
534 return MLX4_ROCE_PF_GIDS
;
537 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
538 actv_ports
= mlx4_get_active_ports(dev
, slave
);
539 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
540 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
542 for (i
= 1; i
< max_port_p_one
; i
++) {
543 struct mlx4_active_ports exclusive_ports
;
544 struct mlx4_slaves_pport slaves_pport_actv
;
545 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
546 set_bit(i
- 1, exclusive_ports
.ports
);
549 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
550 dev
, &exclusive_ports
);
551 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
552 dev
->persist
->num_vfs
+ 1);
554 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->persist
->num_vfs
+ 1) - 1;
555 if (slave_gid
<= ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) % vfs
))
556 return ((MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
) + 1;
557 return (MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
) / vfs
;
560 int mlx4_get_base_gid_ix(struct mlx4_dev
*dev
, int slave
, int port
)
564 int slave_gid
= slave
;
567 struct mlx4_slaves_pport slaves_pport
;
568 struct mlx4_active_ports actv_ports
;
569 unsigned max_port_p_one
;
574 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
575 actv_ports
= mlx4_get_active_ports(dev
, slave
);
576 max_port_p_one
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
) +
577 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) + 1;
579 for (i
= 1; i
< max_port_p_one
; i
++) {
580 struct mlx4_active_ports exclusive_ports
;
581 struct mlx4_slaves_pport slaves_pport_actv
;
582 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
583 set_bit(i
- 1, exclusive_ports
.ports
);
586 slaves_pport_actv
= mlx4_phys_to_slaves_pport_actv(
587 dev
, &exclusive_ports
);
588 slave_gid
-= bitmap_weight(slaves_pport_actv
.slaves
,
589 dev
->persist
->num_vfs
+ 1);
591 gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
592 vfs
= bitmap_weight(slaves_pport
.slaves
, dev
->persist
->num_vfs
+ 1) - 1;
593 if (slave_gid
<= gids
% vfs
)
594 return MLX4_ROCE_PF_GIDS
+ ((gids
/ vfs
) + 1) * (slave_gid
- 1);
596 return MLX4_ROCE_PF_GIDS
+ (gids
% vfs
) +
597 ((gids
/ vfs
) * (slave_gid
- 1));
599 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix
);
601 static int mlx4_reset_roce_port_gids(struct mlx4_dev
*dev
, int slave
,
602 int port
, struct mlx4_cmd_mailbox
*mailbox
)
604 struct mlx4_roce_gid_entry
*gid_entry_mbox
;
605 struct mlx4_priv
*priv
= mlx4_priv(dev
);
606 int num_gids
, base
, offset
;
609 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
610 base
= mlx4_get_base_gid_ix(dev
, slave
, port
);
612 memset(mailbox
->buf
, 0, MLX4_MAILBOX_SIZE
);
614 mutex_lock(&(priv
->port
[port
].gid_table
.mutex
));
615 /* Zero-out gids belonging to that slave in the port GID table */
616 for (i
= 0, offset
= base
; i
< num_gids
; offset
++, i
++)
617 memcpy(priv
->port
[port
].gid_table
.roce_gids
[offset
].raw
,
618 zgid_entry
.raw
, MLX4_ROCE_GID_ENTRY_SIZE
);
620 /* Now, copy roce port gids table to mailbox for passing to FW */
621 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)mailbox
->buf
;
622 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; gid_entry_mbox
++, i
++)
623 memcpy(gid_entry_mbox
->raw
,
624 priv
->port
[port
].gid_table
.roce_gids
[i
].raw
,
625 MLX4_ROCE_GID_ENTRY_SIZE
);
627 err
= mlx4_cmd(dev
, mailbox
->dma
,
628 ((u32
)port
) | (MLX4_SET_PORT_GID_TABLE
<< 8), 1,
629 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
631 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
636 void mlx4_reset_roce_gids(struct mlx4_dev
*dev
, int slave
)
638 struct mlx4_active_ports actv_ports
;
639 struct mlx4_cmd_mailbox
*mailbox
;
640 int num_eth_ports
, err
;
643 if (slave
< 0 || slave
> dev
->persist
->num_vfs
)
646 actv_ports
= mlx4_get_active_ports(dev
, slave
);
648 for (i
= 0, num_eth_ports
= 0; i
< dev
->caps
.num_ports
; i
++) {
649 if (test_bit(i
, actv_ports
.ports
)) {
650 if (dev
->caps
.port_type
[i
+ 1] != MLX4_PORT_TYPE_ETH
)
659 /* have ETH ports. Alloc mailbox for SET_PORT command */
660 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
664 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
665 if (test_bit(i
, actv_ports
.ports
)) {
666 if (dev
->caps
.port_type
[i
+ 1] != MLX4_PORT_TYPE_ETH
)
668 err
= mlx4_reset_roce_port_gids(dev
, slave
, i
+ 1, mailbox
);
670 mlx4_warn(dev
, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
675 mlx4_free_cmd_mailbox(dev
, mailbox
);
679 static int mlx4_common_set_port(struct mlx4_dev
*dev
, int slave
, u32 in_mod
,
680 u8 op_mod
, struct mlx4_cmd_mailbox
*inbox
)
682 struct mlx4_priv
*priv
= mlx4_priv(dev
);
683 struct mlx4_port_info
*port_info
;
684 struct mlx4_mfunc_master_ctx
*master
= &priv
->mfunc
.master
;
685 struct mlx4_slave_state
*slave_st
= &master
->slave_state
[slave
];
686 struct mlx4_set_port_rqp_calc_context
*qpn_context
;
687 struct mlx4_set_port_general_context
*gen_context
;
688 struct mlx4_roce_gid_entry
*gid_entry_tbl
, *gid_entry_mbox
, *gid_entry_mb1
;
689 int reset_qkey_viols
;
701 __be32 slave_cap_mask
;
704 port
= in_mod
& 0xff;
705 in_modifier
= in_mod
>> 8;
707 port_info
= &priv
->port
[port
];
709 /* Slaves cannot perform SET_PORT operations except changing MTU */
711 if (slave
!= dev
->caps
.function
&&
712 in_modifier
!= MLX4_SET_PORT_GENERAL
&&
713 in_modifier
!= MLX4_SET_PORT_GID_TABLE
) {
714 mlx4_warn(dev
, "denying SET_PORT for slave:%d\n",
718 switch (in_modifier
) {
719 case MLX4_SET_PORT_RQP_CALC
:
720 qpn_context
= inbox
->buf
;
721 qpn_context
->base_qpn
=
722 cpu_to_be32(port_info
->base_qpn
);
723 qpn_context
->n_mac
= 0x7;
724 promisc
= be32_to_cpu(qpn_context
->promisc
) >>
725 SET_PORT_PROMISC_SHIFT
;
726 qpn_context
->promisc
= cpu_to_be32(
727 promisc
<< SET_PORT_PROMISC_SHIFT
|
728 port_info
->base_qpn
);
729 promisc
= be32_to_cpu(qpn_context
->mcast
) >>
730 SET_PORT_MC_PROMISC_SHIFT
;
731 qpn_context
->mcast
= cpu_to_be32(
732 promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
733 port_info
->base_qpn
);
735 case MLX4_SET_PORT_GENERAL
:
736 gen_context
= inbox
->buf
;
737 /* Mtu is configured as the max MTU among all the
738 * the functions on the port. */
739 mtu
= be16_to_cpu(gen_context
->mtu
);
740 mtu
= min_t(int, mtu
, dev
->caps
.eth_mtu_cap
[port
] +
741 ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
);
742 prev_mtu
= slave_st
->mtu
[port
];
743 slave_st
->mtu
[port
] = mtu
;
744 if (mtu
> master
->max_mtu
[port
])
745 master
->max_mtu
[port
] = mtu
;
746 if (mtu
< prev_mtu
&& prev_mtu
==
747 master
->max_mtu
[port
]) {
748 slave_st
->mtu
[port
] = mtu
;
749 master
->max_mtu
[port
] = mtu
;
750 for (i
= 0; i
< dev
->num_slaves
; i
++) {
751 master
->max_mtu
[port
] =
752 max(master
->max_mtu
[port
],
753 master
->slave_state
[i
].mtu
[port
]);
757 gen_context
->mtu
= cpu_to_be16(master
->max_mtu
[port
]);
759 case MLX4_SET_PORT_GID_TABLE
:
760 /* change to MULTIPLE entries: number of guest's gids
761 * need a FOR-loop here over number of gids the guest has.
762 * 1. Check no duplicates in gids passed by slave
764 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
765 base
= mlx4_get_base_gid_ix(dev
, slave
, port
);
766 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
767 for (i
= 0; i
< num_gids
; gid_entry_mbox
++, i
++) {
768 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
771 gid_entry_mb1
= gid_entry_mbox
+ 1;
772 for (j
= i
+ 1; j
< num_gids
; gid_entry_mb1
++, j
++) {
773 if (!memcmp(gid_entry_mb1
->raw
,
774 zgid_entry
.raw
, sizeof(zgid_entry
)))
776 if (!memcmp(gid_entry_mb1
->raw
, gid_entry_mbox
->raw
,
777 sizeof(gid_entry_mbox
->raw
))) {
778 /* found duplicate */
784 /* 2. Check that do not have duplicates in OTHER
785 * entries in the port GID table
788 mutex_lock(&(priv
->port
[port
].gid_table
.mutex
));
789 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
790 if (i
>= base
&& i
< base
+ num_gids
)
791 continue; /* don't compare to slave's current gids */
792 gid_entry_tbl
= &priv
->port
[port
].gid_table
.roce_gids
[i
];
793 if (!memcmp(gid_entry_tbl
->raw
, zgid_entry
.raw
, sizeof(zgid_entry
)))
795 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
796 for (j
= 0; j
< num_gids
; gid_entry_mbox
++, j
++) {
797 if (!memcmp(gid_entry_mbox
->raw
, zgid_entry
.raw
,
800 if (!memcmp(gid_entry_mbox
->raw
, gid_entry_tbl
->raw
,
801 sizeof(gid_entry_tbl
->raw
))) {
802 /* found duplicate */
803 mlx4_warn(dev
, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
805 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
811 /* insert slave GIDs with memcpy, starting at slave's base index */
812 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
813 for (i
= 0, offset
= base
; i
< num_gids
; gid_entry_mbox
++, offset
++, i
++)
814 memcpy(priv
->port
[port
].gid_table
.roce_gids
[offset
].raw
,
815 gid_entry_mbox
->raw
, MLX4_ROCE_GID_ENTRY_SIZE
);
817 /* Now, copy roce port gids table to current mailbox for passing to FW */
818 gid_entry_mbox
= (struct mlx4_roce_gid_entry
*)(inbox
->buf
);
819 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; gid_entry_mbox
++, i
++)
820 memcpy(gid_entry_mbox
->raw
,
821 priv
->port
[port
].gid_table
.roce_gids
[i
].raw
,
822 MLX4_ROCE_GID_ENTRY_SIZE
);
824 err
= mlx4_cmd(dev
, inbox
->dma
, in_mod
& 0xffff, op_mod
,
825 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
827 mutex_unlock(&(priv
->port
[port
].gid_table
.mutex
));
831 return mlx4_cmd(dev
, inbox
->dma
, in_mod
& 0xffff, op_mod
,
832 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
836 /* For IB, we only consider:
837 * - The capability mask, which is set to the aggregate of all
838 * slave function capabilities
839 * - The QKey violatin counter - reset according to each request.
842 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
843 reset_qkey_viols
= (*(u8
*) inbox
->buf
) & 0x40;
844 new_cap_mask
= ((__be32
*) inbox
->buf
)[2];
846 reset_qkey_viols
= ((u8
*) inbox
->buf
)[3] & 0x1;
847 new_cap_mask
= ((__be32
*) inbox
->buf
)[1];
850 /* slave may not set the IS_SM capability for the port */
851 if (slave
!= mlx4_master_func_num(dev
) &&
852 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_IS_SM
))
855 /* No DEV_MGMT in multifunc mode */
856 if (mlx4_is_mfunc(dev
) &&
857 (be32_to_cpu(new_cap_mask
) & MLX4_PORT_CAP_DEV_MGMT_SUP
))
862 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
863 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] = new_cap_mask
;
864 for (i
= 0; i
< dev
->num_slaves
; i
++)
866 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
];
868 /* only clear mailbox for guests. Master may be setting
869 * MTU or PKEY table size
871 if (slave
!= dev
->caps
.function
)
872 memset(inbox
->buf
, 0, 256);
873 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
874 *(u8
*) inbox
->buf
|= !!reset_qkey_viols
<< 6;
875 ((__be32
*) inbox
->buf
)[2] = agg_cap_mask
;
877 ((u8
*) inbox
->buf
)[3] |= !!reset_qkey_viols
;
878 ((__be32
*) inbox
->buf
)[1] = agg_cap_mask
;
881 err
= mlx4_cmd(dev
, inbox
->dma
, port
, is_eth
, MLX4_CMD_SET_PORT
,
882 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
884 priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
] =
889 int mlx4_SET_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
890 struct mlx4_vhcr
*vhcr
,
891 struct mlx4_cmd_mailbox
*inbox
,
892 struct mlx4_cmd_mailbox
*outbox
,
893 struct mlx4_cmd_info
*cmd
)
895 int port
= mlx4_slave_convert_port(
896 dev
, slave
, vhcr
->in_modifier
& 0xFF);
901 vhcr
->in_modifier
= (vhcr
->in_modifier
& ~0xFF) |
904 return mlx4_common_set_port(dev
, slave
, vhcr
->in_modifier
,
905 vhcr
->op_modifier
, inbox
);
908 /* bit locations for set port command with zero op modifier */
910 MLX4_SET_PORT_VL_CAP
= 4, /* bits 7:4 */
911 MLX4_SET_PORT_MTU_CAP
= 12, /* bits 15:12 */
912 MLX4_CHANGE_PORT_PKEY_TBL_SZ
= 20,
913 MLX4_CHANGE_PORT_VL_CAP
= 21,
914 MLX4_CHANGE_PORT_MTU_CAP
= 22,
917 int mlx4_SET_PORT(struct mlx4_dev
*dev
, u8 port
, int pkey_tbl_sz
)
919 struct mlx4_cmd_mailbox
*mailbox
;
920 int err
, vl_cap
, pkey_tbl_flag
= 0;
922 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
925 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
927 return PTR_ERR(mailbox
);
929 ((__be32
*) mailbox
->buf
)[1] = dev
->caps
.ib_port_def_cap
[port
];
931 if (pkey_tbl_sz
>= 0 && mlx4_is_master(dev
)) {
933 ((__be16
*) mailbox
->buf
)[20] = cpu_to_be16(pkey_tbl_sz
);
936 /* IB VL CAP enum isn't used by the firmware, just numerical values */
937 for (vl_cap
= 8; vl_cap
>= 1; vl_cap
>>= 1) {
938 ((__be32
*) mailbox
->buf
)[0] = cpu_to_be32(
939 (1 << MLX4_CHANGE_PORT_MTU_CAP
) |
940 (1 << MLX4_CHANGE_PORT_VL_CAP
) |
941 (pkey_tbl_flag
<< MLX4_CHANGE_PORT_PKEY_TBL_SZ
) |
942 (dev
->caps
.port_ib_mtu
[port
] << MLX4_SET_PORT_MTU_CAP
) |
943 (vl_cap
<< MLX4_SET_PORT_VL_CAP
));
944 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_SET_PORT
,
945 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
950 mlx4_free_cmd_mailbox(dev
, mailbox
);
954 int mlx4_SET_PORT_general(struct mlx4_dev
*dev
, u8 port
, int mtu
,
955 u8 pptx
, u8 pfctx
, u8 pprx
, u8 pfcrx
)
957 struct mlx4_cmd_mailbox
*mailbox
;
958 struct mlx4_set_port_general_context
*context
;
962 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
964 return PTR_ERR(mailbox
);
965 context
= mailbox
->buf
;
966 context
->flags
= SET_PORT_GEN_ALL_VALID
;
967 context
->mtu
= cpu_to_be16(mtu
);
968 context
->pptx
= (pptx
* (!pfctx
)) << 7;
969 context
->pfctx
= pfctx
;
970 context
->pprx
= (pprx
* (!pfcrx
)) << 7;
971 context
->pfcrx
= pfcrx
;
973 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
974 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
975 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
977 mlx4_free_cmd_mailbox(dev
, mailbox
);
980 EXPORT_SYMBOL(mlx4_SET_PORT_general
);
982 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev
*dev
, u8 port
, u32 base_qpn
,
985 struct mlx4_cmd_mailbox
*mailbox
;
986 struct mlx4_set_port_rqp_calc_context
*context
;
989 u32 m_promisc
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) ?
990 MCAST_DIRECT
: MCAST_DEFAULT
;
992 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
995 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
997 return PTR_ERR(mailbox
);
998 context
= mailbox
->buf
;
999 context
->base_qpn
= cpu_to_be32(base_qpn
);
1000 context
->n_mac
= dev
->caps
.log_num_macs
;
1001 context
->promisc
= cpu_to_be32(promisc
<< SET_PORT_PROMISC_SHIFT
|
1003 context
->mcast
= cpu_to_be32(m_promisc
<< SET_PORT_MC_PROMISC_SHIFT
|
1005 context
->intra_no_vlan
= 0;
1006 context
->no_vlan
= MLX4_NO_VLAN_IDX
;
1007 context
->intra_vlan_miss
= 0;
1008 context
->vlan_miss
= MLX4_VLAN_MISS_IDX
;
1010 in_mod
= MLX4_SET_PORT_RQP_CALC
<< 8 | port
;
1011 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1012 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
1014 mlx4_free_cmd_mailbox(dev
, mailbox
);
1017 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc
);
1019 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev
*dev
, u8 port
, u8
*prio2tc
)
1021 struct mlx4_cmd_mailbox
*mailbox
;
1022 struct mlx4_set_port_prio2tc_context
*context
;
1027 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1028 if (IS_ERR(mailbox
))
1029 return PTR_ERR(mailbox
);
1030 context
= mailbox
->buf
;
1031 for (i
= 0; i
< MLX4_NUM_UP
; i
+= 2)
1032 context
->prio2tc
[i
>> 1] = prio2tc
[i
] << 4 | prio2tc
[i
+ 1];
1034 in_mod
= MLX4_SET_PORT_PRIO2TC
<< 8 | port
;
1035 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1036 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1038 mlx4_free_cmd_mailbox(dev
, mailbox
);
1041 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC
);
1043 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev
*dev
, u8 port
, u8
*tc_tx_bw
,
1044 u8
*pg
, u16
*ratelimit
)
1046 struct mlx4_cmd_mailbox
*mailbox
;
1047 struct mlx4_set_port_scheduler_context
*context
;
1052 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1053 if (IS_ERR(mailbox
))
1054 return PTR_ERR(mailbox
);
1055 context
= mailbox
->buf
;
1057 for (i
= 0; i
< MLX4_NUM_TC
; i
++) {
1058 struct mlx4_port_scheduler_tc_cfg_be
*tc
= &context
->tc
[i
];
1061 if (ratelimit
&& ratelimit
[i
]) {
1062 if (ratelimit
[i
] <= MLX4_MAX_100M_UNITS_VAL
) {
1065 htons(MLX4_RATELIMIT_100M_UNITS
);
1067 r
= ratelimit
[i
]/10;
1069 htons(MLX4_RATELIMIT_1G_UNITS
);
1071 tc
->max_bw_value
= htons(r
);
1073 tc
->max_bw_value
= htons(MLX4_RATELIMIT_DEFAULT
);
1074 tc
->max_bw_units
= htons(MLX4_RATELIMIT_1G_UNITS
);
1077 tc
->pg
= htons(pg
[i
]);
1078 tc
->bw_precentage
= htons(tc_tx_bw
[i
]);
1081 in_mod
= MLX4_SET_PORT_SCHEDULER
<< 8 | port
;
1082 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1083 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1085 mlx4_free_cmd_mailbox(dev
, mailbox
);
1088 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER
);
1091 VXLAN_ENABLE_MODIFY
= 1 << 7,
1092 VXLAN_STEERING_MODIFY
= 1 << 6,
1094 VXLAN_ENABLE
= 1 << 7,
1097 struct mlx4_set_port_vxlan_context
{
1105 int mlx4_SET_PORT_VXLAN(struct mlx4_dev
*dev
, u8 port
, u8 steering
, int enable
)
1109 struct mlx4_cmd_mailbox
*mailbox
;
1110 struct mlx4_set_port_vxlan_context
*context
;
1112 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1113 if (IS_ERR(mailbox
))
1114 return PTR_ERR(mailbox
);
1115 context
= mailbox
->buf
;
1116 memset(context
, 0, sizeof(*context
));
1118 context
->modify_flags
= VXLAN_ENABLE_MODIFY
| VXLAN_STEERING_MODIFY
;
1120 context
->enable_flags
= VXLAN_ENABLE
;
1121 context
->steering
= steering
;
1123 in_mod
= MLX4_SET_PORT_VXLAN
<< 8 | port
;
1124 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, 1, MLX4_CMD_SET_PORT
,
1125 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1127 mlx4_free_cmd_mailbox(dev
, mailbox
);
1130 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN
);
1132 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1133 struct mlx4_vhcr
*vhcr
,
1134 struct mlx4_cmd_mailbox
*inbox
,
1135 struct mlx4_cmd_mailbox
*outbox
,
1136 struct mlx4_cmd_info
*cmd
)
1143 int mlx4_SET_MCAST_FLTR(struct mlx4_dev
*dev
, u8 port
,
1144 u64 mac
, u64 clear
, u8 mode
)
1146 return mlx4_cmd(dev
, (mac
| (clear
<< 63)), port
, mode
,
1147 MLX4_CMD_SET_MCAST_FLTR
, MLX4_CMD_TIME_CLASS_B
,
1150 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR
);
1152 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev
*dev
, int slave
,
1153 struct mlx4_vhcr
*vhcr
,
1154 struct mlx4_cmd_mailbox
*inbox
,
1155 struct mlx4_cmd_mailbox
*outbox
,
1156 struct mlx4_cmd_info
*cmd
)
1163 int mlx4_common_dump_eth_stats(struct mlx4_dev
*dev
, int slave
,
1164 u32 in_mod
, struct mlx4_cmd_mailbox
*outbox
)
1166 return mlx4_cmd_box(dev
, 0, outbox
->dma
, in_mod
, 0,
1167 MLX4_CMD_DUMP_ETH_STATS
, MLX4_CMD_TIME_CLASS_B
,
1171 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev
*dev
, int slave
,
1172 struct mlx4_vhcr
*vhcr
,
1173 struct mlx4_cmd_mailbox
*inbox
,
1174 struct mlx4_cmd_mailbox
*outbox
,
1175 struct mlx4_cmd_info
*cmd
)
1177 if (slave
!= dev
->caps
.function
)
1179 return mlx4_common_dump_eth_stats(dev
, slave
,
1180 vhcr
->in_modifier
, outbox
);
1183 int mlx4_get_slave_from_roce_gid(struct mlx4_dev
*dev
, int port
, u8
*gid
,
1186 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1187 int i
, found_ix
= -1;
1188 int vf_gids
= MLX4_ROCE_MAX_GIDS
- MLX4_ROCE_PF_GIDS
;
1189 struct mlx4_slaves_pport slaves_pport
;
1193 if (!mlx4_is_mfunc(dev
))
1196 slaves_pport
= mlx4_phys_to_slaves_pport(dev
, port
);
1197 num_vfs
= bitmap_weight(slaves_pport
.slaves
,
1198 dev
->persist
->num_vfs
+ 1) - 1;
1200 for (i
= 0; i
< MLX4_ROCE_MAX_GIDS
; i
++) {
1201 if (!memcmp(priv
->port
[port
].gid_table
.roce_gids
[i
].raw
, gid
,
1202 MLX4_ROCE_GID_ENTRY_SIZE
)) {
1208 if (found_ix
>= 0) {
1209 /* Calculate a slave_gid which is the slave number in the gid
1210 * table and not a globally unique slave number.
1212 if (found_ix
< MLX4_ROCE_PF_GIDS
)
1214 else if (found_ix
< MLX4_ROCE_PF_GIDS
+ (vf_gids
% num_vfs
) *
1215 (vf_gids
/ num_vfs
+ 1))
1216 slave_gid
= ((found_ix
- MLX4_ROCE_PF_GIDS
) /
1217 (vf_gids
/ num_vfs
+ 1)) + 1;
1220 ((found_ix
- MLX4_ROCE_PF_GIDS
-
1221 ((vf_gids
% num_vfs
) * ((vf_gids
/ num_vfs
+ 1)))) /
1222 (vf_gids
/ num_vfs
)) + vf_gids
% num_vfs
+ 1;
1224 /* Calculate the globally unique slave id */
1226 struct mlx4_active_ports exclusive_ports
;
1227 struct mlx4_active_ports actv_ports
;
1228 struct mlx4_slaves_pport slaves_pport_actv
;
1229 unsigned max_port_p_one
;
1230 int num_vfs_before
= 0;
1231 int candidate_slave_gid
;
1233 /* Calculate how many VFs are on the previous port, if exists */
1234 for (i
= 1; i
< port
; i
++) {
1235 bitmap_zero(exclusive_ports
.ports
, dev
->caps
.num_ports
);
1236 set_bit(i
- 1, exclusive_ports
.ports
);
1238 mlx4_phys_to_slaves_pport_actv(
1239 dev
, &exclusive_ports
);
1240 num_vfs_before
+= bitmap_weight(
1241 slaves_pport_actv
.slaves
,
1242 dev
->persist
->num_vfs
+ 1);
1245 /* candidate_slave_gid isn't necessarily the correct slave, but
1246 * it has the same number of ports and is assigned to the same
1247 * ports as the real slave we're looking for. On dual port VF,
1248 * slave_gid = [single port VFs on port <port>] +
1249 * [offset of the current slave from the first dual port VF] +
1252 candidate_slave_gid
= slave_gid
+ num_vfs_before
;
1254 actv_ports
= mlx4_get_active_ports(dev
, candidate_slave_gid
);
1255 max_port_p_one
= find_first_bit(
1256 actv_ports
.ports
, dev
->caps
.num_ports
) +
1257 bitmap_weight(actv_ports
.ports
,
1258 dev
->caps
.num_ports
) + 1;
1260 /* Calculate the real slave number */
1261 for (i
= 1; i
< max_port_p_one
; i
++) {
1264 bitmap_zero(exclusive_ports
.ports
,
1265 dev
->caps
.num_ports
);
1266 set_bit(i
- 1, exclusive_ports
.ports
);
1268 mlx4_phys_to_slaves_pport_actv(
1269 dev
, &exclusive_ports
);
1270 slave_gid
+= bitmap_weight(
1271 slaves_pport_actv
.slaves
,
1272 dev
->persist
->num_vfs
+ 1);
1275 *slave_id
= slave_gid
;
1278 return (found_ix
>= 0) ? 0 : -EINVAL
;
1280 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid
);
1282 int mlx4_get_roce_gid_from_slave(struct mlx4_dev
*dev
, int port
, int slave_id
,
1285 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1287 if (!mlx4_is_master(dev
))
1290 memcpy(gid
, priv
->port
[port
].gid_table
.roce_gids
[slave_id
].raw
,
1291 MLX4_ROCE_GID_ENTRY_SIZE
);
1294 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave
);
1296 /* Cable Module Info */
1297 #define MODULE_INFO_MAX_READ 48
1299 #define I2C_ADDR_LOW 0x50
1300 #define I2C_ADDR_HIGH 0x51
1301 #define I2C_PAGE_SIZE 256
1303 /* Module Info Data */
1304 struct mlx4_cable_info
{
1307 __be16 dev_mem_address
;
1310 __be32 reserved2
[2];
1311 u8 data
[MODULE_INFO_MAX_READ
];
1314 enum cable_info_err
{
1315 CABLE_INF_INV_PORT
= 0x1,
1316 CABLE_INF_OP_NOSUP
= 0x2,
1317 CABLE_INF_NOT_CONN
= 0x3,
1318 CABLE_INF_NO_EEPRM
= 0x4,
1319 CABLE_INF_PAGE_ERR
= 0x5,
1320 CABLE_INF_INV_ADDR
= 0x6,
1321 CABLE_INF_I2C_ADDR
= 0x7,
1322 CABLE_INF_QSFP_VIO
= 0x8,
1323 CABLE_INF_I2C_BUSY
= 0x9,
1326 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1328 static inline const char *cable_info_mad_err_str(u16 mad_status
)
1330 u8 err
= MAD_STATUS_2_CABLE_ERR(mad_status
);
1333 case CABLE_INF_INV_PORT
:
1334 return "invalid port selected";
1335 case CABLE_INF_OP_NOSUP
:
1336 return "operation not supported for this port (the port is of type CX4 or internal)";
1337 case CABLE_INF_NOT_CONN
:
1338 return "cable is not connected";
1339 case CABLE_INF_NO_EEPRM
:
1340 return "the connected cable has no EPROM (passive copper cable)";
1341 case CABLE_INF_PAGE_ERR
:
1342 return "page number is greater than 15";
1343 case CABLE_INF_INV_ADDR
:
1344 return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1345 case CABLE_INF_I2C_ADDR
:
1346 return "invalid I2C slave address";
1347 case CABLE_INF_QSFP_VIO
:
1348 return "at least one cable violates the QSFP specification and ignores the modsel signal";
1349 case CABLE_INF_I2C_BUSY
:
1350 return "I2C bus is constantly busy";
1352 return "Unknown Error";
1356 * mlx4_get_module_info - Read cable module eeprom data
1358 * @port: port number.
1359 * @offset: byte offset in eeprom to start reading data from.
1360 * @size: num of bytes to read.
1361 * @data: output buffer to put the requested data into.
1363 * Reads cable module eeprom data, puts the outcome data into
1364 * data pointer paramer.
1365 * Returns num of read bytes on success or a negative error
1368 int mlx4_get_module_info(struct mlx4_dev
*dev
, u8 port
,
1369 u16 offset
, u16 size
, u8
*data
)
1371 struct mlx4_cmd_mailbox
*inbox
, *outbox
;
1372 struct mlx4_mad_ifc
*inmad
, *outmad
;
1373 struct mlx4_cable_info
*cable_info
;
1377 if (size
> MODULE_INFO_MAX_READ
)
1378 size
= MODULE_INFO_MAX_READ
;
1380 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1382 return PTR_ERR(inbox
);
1384 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1385 if (IS_ERR(outbox
)) {
1386 mlx4_free_cmd_mailbox(dev
, inbox
);
1387 return PTR_ERR(outbox
);
1390 inmad
= (struct mlx4_mad_ifc
*)(inbox
->buf
);
1391 outmad
= (struct mlx4_mad_ifc
*)(outbox
->buf
);
1393 inmad
->method
= 0x1; /* Get */
1394 inmad
->class_version
= 0x1;
1395 inmad
->mgmt_class
= 0x1;
1396 inmad
->base_version
= 0x1;
1397 inmad
->attr_id
= cpu_to_be16(0xFF60); /* Module Info */
1399 if (offset
< I2C_PAGE_SIZE
&& offset
+ size
> I2C_PAGE_SIZE
)
1400 /* Cross pages reads are not allowed
1401 * read until offset 256 in low page
1403 size
-= offset
+ size
- I2C_PAGE_SIZE
;
1405 i2c_addr
= I2C_ADDR_LOW
;
1406 if (offset
>= I2C_PAGE_SIZE
) {
1407 /* Reset offset to high page */
1408 i2c_addr
= I2C_ADDR_HIGH
;
1409 offset
-= I2C_PAGE_SIZE
;
1412 cable_info
= (struct mlx4_cable_info
*)inmad
->data
;
1413 cable_info
->dev_mem_address
= cpu_to_be16(offset
);
1414 cable_info
->page_num
= 0;
1415 cable_info
->i2c_addr
= i2c_addr
;
1416 cable_info
->size
= cpu_to_be16(size
);
1418 ret
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
1419 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
1424 if (be16_to_cpu(outmad
->status
)) {
1425 /* Mad returned with bad status */
1426 ret
= be16_to_cpu(outmad
->status
);
1428 "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
1429 0xFF60, port
, i2c_addr
, offset
, size
,
1430 ret
, cable_info_mad_err_str(ret
));
1432 if (i2c_addr
== I2C_ADDR_HIGH
&&
1433 MAD_STATUS_2_CABLE_ERR(ret
) == CABLE_INF_I2C_ADDR
)
1434 /* Some SFP cables do not support i2c slave
1435 * address 0x51 (high page), abort silently.
1442 cable_info
= (struct mlx4_cable_info
*)outmad
->data
;
1443 memcpy(data
, cable_info
->data
, size
);
1446 mlx4_free_cmd_mailbox(dev
, inbox
);
1447 mlx4_free_cmd_mailbox(dev
, outbox
);
1450 EXPORT_SYMBOL(mlx4_get_module_info
);