2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/ethtool.h>
36 #include <linux/netdevice.h>
37 #include <linux/mlx4/driver.h>
38 #include <linux/mlx4/device.h>
45 #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
46 #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
47 #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
49 static int mlx4_en_moderation_update(struct mlx4_en_priv
*priv
)
54 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
55 priv
->tx_cq
[i
]->moder_cnt
= priv
->tx_frames
;
56 priv
->tx_cq
[i
]->moder_time
= priv
->tx_usecs
;
58 err
= mlx4_en_set_cq_moder(priv
, priv
->tx_cq
[i
]);
64 if (priv
->adaptive_rx_coal
)
67 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
68 priv
->rx_cq
[i
]->moder_cnt
= priv
->rx_frames
;
69 priv
->rx_cq
[i
]->moder_time
= priv
->rx_usecs
;
70 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
72 err
= mlx4_en_set_cq_moder(priv
, priv
->rx_cq
[i
]);
82 mlx4_en_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*drvinfo
)
84 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
85 struct mlx4_en_dev
*mdev
= priv
->mdev
;
87 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
88 strlcpy(drvinfo
->version
, DRV_VERSION
" (" DRV_RELDATE
")",
89 sizeof(drvinfo
->version
));
90 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
92 (u16
) (mdev
->dev
->caps
.fw_ver
>> 32),
93 (u16
) ((mdev
->dev
->caps
.fw_ver
>> 16) & 0xffff),
94 (u16
) (mdev
->dev
->caps
.fw_ver
& 0xffff));
95 strlcpy(drvinfo
->bus_info
, pci_name(mdev
->dev
->pdev
),
96 sizeof(drvinfo
->bus_info
));
98 drvinfo
->regdump_len
= 0;
99 drvinfo
->eedump_len
= 0;
102 static const char mlx4_en_priv_flags
[][ETH_GSTRING_LEN
] = {
106 static const char main_strings
[][ETH_GSTRING_LEN
] = {
107 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
108 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
109 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
110 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
111 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
112 "tx_heartbeat_errors", "tx_window_errors",
114 /* port statistics */
117 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
118 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
120 /* packet statistics */
121 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
122 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
123 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
124 "tx_prio_6", "tx_prio_7",
126 #define NUM_MAIN_STATS 21
127 #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
129 static const char mlx4_en_test_names
[][ETH_GSTRING_LEN
]= {
137 static u32
mlx4_en_get_msglevel(struct net_device
*dev
)
139 return ((struct mlx4_en_priv
*) netdev_priv(dev
))->msg_enable
;
142 static void mlx4_en_set_msglevel(struct net_device
*dev
, u32 val
)
144 ((struct mlx4_en_priv
*) netdev_priv(dev
))->msg_enable
= val
;
147 static void mlx4_en_get_wol(struct net_device
*netdev
,
148 struct ethtool_wolinfo
*wol
)
150 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
155 if ((priv
->port
< 1) || (priv
->port
> 2)) {
156 en_err(priv
, "Failed to get WoL information\n");
160 mask
= (priv
->port
== 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1
:
161 MLX4_DEV_CAP_FLAG_WOL_PORT2
;
163 if (!(priv
->mdev
->dev
->caps
.flags
& mask
)) {
169 err
= mlx4_wol_read(priv
->mdev
->dev
, &config
, priv
->port
);
171 en_err(priv
, "Failed to get WoL information\n");
175 if (config
& MLX4_EN_WOL_MAGIC
)
176 wol
->supported
= WAKE_MAGIC
;
180 if (config
& MLX4_EN_WOL_ENABLED
)
181 wol
->wolopts
= WAKE_MAGIC
;
186 static int mlx4_en_set_wol(struct net_device
*netdev
,
187 struct ethtool_wolinfo
*wol
)
189 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
194 if ((priv
->port
< 1) || (priv
->port
> 2))
197 mask
= (priv
->port
== 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1
:
198 MLX4_DEV_CAP_FLAG_WOL_PORT2
;
200 if (!(priv
->mdev
->dev
->caps
.flags
& mask
))
203 if (wol
->supported
& ~WAKE_MAGIC
)
206 err
= mlx4_wol_read(priv
->mdev
->dev
, &config
, priv
->port
);
208 en_err(priv
, "Failed to get WoL info, unable to modify\n");
212 if (wol
->wolopts
& WAKE_MAGIC
) {
213 config
|= MLX4_EN_WOL_DO_MODIFY
| MLX4_EN_WOL_ENABLED
|
216 config
&= ~(MLX4_EN_WOL_ENABLED
| MLX4_EN_WOL_MAGIC
);
217 config
|= MLX4_EN_WOL_DO_MODIFY
;
220 err
= mlx4_wol_write(priv
->mdev
->dev
, config
, priv
->port
);
222 en_err(priv
, "Failed to set WoL information\n");
227 static int mlx4_en_get_sset_count(struct net_device
*dev
, int sset
)
229 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
230 int bit_count
= hweight64(priv
->stats_bitmap
);
234 return (priv
->stats_bitmap
? bit_count
: NUM_ALL_STATS
) +
235 (priv
->tx_ring_num
* 2) +
236 #ifdef CONFIG_NET_RX_BUSY_POLL
237 (priv
->rx_ring_num
* 5);
239 (priv
->rx_ring_num
* 2);
242 return MLX4_EN_NUM_SELF_TEST
- !(priv
->mdev
->dev
->caps
.flags
243 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK
) * 2;
244 case ETH_SS_PRIV_FLAGS
:
245 return ARRAY_SIZE(mlx4_en_priv_flags
);
251 static void mlx4_en_get_ethtool_stats(struct net_device
*dev
,
252 struct ethtool_stats
*stats
, uint64_t *data
)
254 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
258 spin_lock_bh(&priv
->stats_lock
);
260 if (!(priv
->stats_bitmap
)) {
261 for (i
= 0; i
< NUM_MAIN_STATS
; i
++)
263 ((unsigned long *) &priv
->stats
)[i
];
264 for (i
= 0; i
< NUM_PORT_STATS
; i
++)
266 ((unsigned long *) &priv
->port_stats
)[i
];
267 for (i
= 0; i
< NUM_PKT_STATS
; i
++)
269 ((unsigned long *) &priv
->pkstats
)[i
];
271 for (i
= 0; i
< NUM_MAIN_STATS
; i
++) {
272 if ((priv
->stats_bitmap
>> j
) & 1)
274 ((unsigned long *) &priv
->stats
)[i
];
277 for (i
= 0; i
< NUM_PORT_STATS
; i
++) {
278 if ((priv
->stats_bitmap
>> j
) & 1)
280 ((unsigned long *) &priv
->port_stats
)[i
];
284 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
285 data
[index
++] = priv
->tx_ring
[i
]->packets
;
286 data
[index
++] = priv
->tx_ring
[i
]->bytes
;
288 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
289 data
[index
++] = priv
->rx_ring
[i
]->packets
;
290 data
[index
++] = priv
->rx_ring
[i
]->bytes
;
291 #ifdef CONFIG_NET_RX_BUSY_POLL
292 data
[index
++] = priv
->rx_ring
[i
]->yields
;
293 data
[index
++] = priv
->rx_ring
[i
]->misses
;
294 data
[index
++] = priv
->rx_ring
[i
]->cleaned
;
297 spin_unlock_bh(&priv
->stats_lock
);
301 static void mlx4_en_self_test(struct net_device
*dev
,
302 struct ethtool_test
*etest
, u64
*buf
)
304 mlx4_en_ex_selftest(dev
, &etest
->flags
, buf
);
307 static void mlx4_en_get_strings(struct net_device
*dev
,
308 uint32_t stringset
, uint8_t *data
)
310 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
316 for (i
= 0; i
< MLX4_EN_NUM_SELF_TEST
- 2; i
++)
317 strcpy(data
+ i
* ETH_GSTRING_LEN
, mlx4_en_test_names
[i
]);
318 if (priv
->mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK
)
319 for (; i
< MLX4_EN_NUM_SELF_TEST
; i
++)
320 strcpy(data
+ i
* ETH_GSTRING_LEN
, mlx4_en_test_names
[i
]);
324 /* Add main counters */
325 if (!priv
->stats_bitmap
) {
326 for (i
= 0; i
< NUM_MAIN_STATS
; i
++)
327 strcpy(data
+ (index
++) * ETH_GSTRING_LEN
,
329 for (i
= 0; i
< NUM_PORT_STATS
; i
++)
330 strcpy(data
+ (index
++) * ETH_GSTRING_LEN
,
333 for (i
= 0; i
< NUM_PKT_STATS
; i
++)
334 strcpy(data
+ (index
++) * ETH_GSTRING_LEN
,
339 for (i
= 0; i
< NUM_MAIN_STATS
+ NUM_PORT_STATS
; i
++) {
340 if ((priv
->stats_bitmap
>> i
) & 1) {
342 (index
++) * ETH_GSTRING_LEN
,
345 if (!(priv
->stats_bitmap
>> i
))
348 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
349 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
351 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
354 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
355 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
357 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
359 #ifdef CONFIG_NET_RX_BUSY_POLL
360 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
361 "rx%d_napi_yield", i
);
362 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
364 sprintf(data
+ (index
++) * ETH_GSTRING_LEN
,
369 case ETH_SS_PRIV_FLAGS
:
370 for (i
= 0; i
< ARRAY_SIZE(mlx4_en_priv_flags
); i
++)
371 strcpy(data
+ i
* ETH_GSTRING_LEN
,
372 mlx4_en_priv_flags
[i
]);
378 static u32
mlx4_en_autoneg_get(struct net_device
*dev
)
380 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
381 struct mlx4_en_dev
*mdev
= priv
->mdev
;
382 u32 autoneg
= AUTONEG_DISABLE
;
384 if ((mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP
) &&
385 (priv
->port_state
.flags
& MLX4_EN_PORT_ANE
))
386 autoneg
= AUTONEG_ENABLE
;
391 static u32
ptys_get_supported_port(struct mlx4_ptys_reg
*ptys_reg
)
393 u32 eth_proto
= be32_to_cpu(ptys_reg
->eth_proto_cap
);
395 if (eth_proto
& (MLX4_PROT_MASK(MLX4_10GBASE_T
)
396 | MLX4_PROT_MASK(MLX4_1000BASE_T
)
397 | MLX4_PROT_MASK(MLX4_100BASE_TX
))) {
401 if (eth_proto
& (MLX4_PROT_MASK(MLX4_10GBASE_CR
)
402 | MLX4_PROT_MASK(MLX4_10GBASE_SR
)
403 | MLX4_PROT_MASK(MLX4_56GBASE_SR4
)
404 | MLX4_PROT_MASK(MLX4_40GBASE_CR4
)
405 | MLX4_PROT_MASK(MLX4_40GBASE_SR4
)
406 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII
))) {
407 return SUPPORTED_FIBRE
;
410 if (eth_proto
& (MLX4_PROT_MASK(MLX4_56GBASE_KR4
)
411 | MLX4_PROT_MASK(MLX4_40GBASE_KR4
)
412 | MLX4_PROT_MASK(MLX4_20GBASE_KR2
)
413 | MLX4_PROT_MASK(MLX4_10GBASE_KR
)
414 | MLX4_PROT_MASK(MLX4_10GBASE_KX4
)
415 | MLX4_PROT_MASK(MLX4_1000BASE_KX
))) {
416 return SUPPORTED_Backplane
;
421 static u32
ptys_get_active_port(struct mlx4_ptys_reg
*ptys_reg
)
423 u32 eth_proto
= be32_to_cpu(ptys_reg
->eth_proto_oper
);
425 if (!eth_proto
) /* link down */
426 eth_proto
= be32_to_cpu(ptys_reg
->eth_proto_cap
);
428 if (eth_proto
& (MLX4_PROT_MASK(MLX4_10GBASE_T
)
429 | MLX4_PROT_MASK(MLX4_1000BASE_T
)
430 | MLX4_PROT_MASK(MLX4_100BASE_TX
))) {
434 if (eth_proto
& (MLX4_PROT_MASK(MLX4_10GBASE_SR
)
435 | MLX4_PROT_MASK(MLX4_56GBASE_SR4
)
436 | MLX4_PROT_MASK(MLX4_40GBASE_SR4
)
437 | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII
))) {
441 if (eth_proto
& (MLX4_PROT_MASK(MLX4_10GBASE_CR
)
442 | MLX4_PROT_MASK(MLX4_56GBASE_CR4
)
443 | MLX4_PROT_MASK(MLX4_40GBASE_CR4
))) {
447 if (eth_proto
& (MLX4_PROT_MASK(MLX4_56GBASE_KR4
)
448 | MLX4_PROT_MASK(MLX4_40GBASE_KR4
)
449 | MLX4_PROT_MASK(MLX4_20GBASE_KR2
)
450 | MLX4_PROT_MASK(MLX4_10GBASE_KR
)
451 | MLX4_PROT_MASK(MLX4_10GBASE_KX4
)
452 | MLX4_PROT_MASK(MLX4_1000BASE_KX
))) {
458 #define MLX4_LINK_MODES_SZ \
459 (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
461 enum ethtool_report
{
467 /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
468 static u32 ptys2ethtool_map
[MLX4_LINK_MODES_SZ
][3] = {
469 [MLX4_100BASE_TX
] = {
470 SUPPORTED_100baseT_Full
,
471 ADVERTISED_100baseT_Full
,
475 [MLX4_1000BASE_T
] = {
476 SUPPORTED_1000baseT_Full
,
477 ADVERTISED_1000baseT_Full
,
480 [MLX4_1000BASE_CX_SGMII
] = {
481 SUPPORTED_1000baseKX_Full
,
482 ADVERTISED_1000baseKX_Full
,
485 [MLX4_1000BASE_KX
] = {
486 SUPPORTED_1000baseKX_Full
,
487 ADVERTISED_1000baseKX_Full
,
492 SUPPORTED_10000baseT_Full
,
493 ADVERTISED_10000baseT_Full
,
496 [MLX4_10GBASE_CX4
] = {
497 SUPPORTED_10000baseKX4_Full
,
498 ADVERTISED_10000baseKX4_Full
,
501 [MLX4_10GBASE_KX4
] = {
502 SUPPORTED_10000baseKX4_Full
,
503 ADVERTISED_10000baseKX4_Full
,
506 [MLX4_10GBASE_KR
] = {
507 SUPPORTED_10000baseKR_Full
,
508 ADVERTISED_10000baseKR_Full
,
511 [MLX4_10GBASE_CR
] = {
512 SUPPORTED_10000baseKR_Full
,
513 ADVERTISED_10000baseKR_Full
,
516 [MLX4_10GBASE_SR
] = {
517 SUPPORTED_10000baseKR_Full
,
518 ADVERTISED_10000baseKR_Full
,
522 [MLX4_20GBASE_KR2
] = {
523 SUPPORTED_20000baseMLD2_Full
| SUPPORTED_20000baseKR2_Full
,
524 ADVERTISED_20000baseMLD2_Full
| ADVERTISED_20000baseKR2_Full
,
528 [MLX4_40GBASE_CR4
] = {
529 SUPPORTED_40000baseCR4_Full
,
530 ADVERTISED_40000baseCR4_Full
,
533 [MLX4_40GBASE_KR4
] = {
534 SUPPORTED_40000baseKR4_Full
,
535 ADVERTISED_40000baseKR4_Full
,
538 [MLX4_40GBASE_SR4
] = {
539 SUPPORTED_40000baseSR4_Full
,
540 ADVERTISED_40000baseSR4_Full
,
544 [MLX4_56GBASE_KR4
] = {
545 SUPPORTED_56000baseKR4_Full
,
546 ADVERTISED_56000baseKR4_Full
,
549 [MLX4_56GBASE_CR4
] = {
550 SUPPORTED_56000baseCR4_Full
,
551 ADVERTISED_56000baseCR4_Full
,
554 [MLX4_56GBASE_SR4
] = {
555 SUPPORTED_56000baseSR4_Full
,
556 ADVERTISED_56000baseSR4_Full
,
561 static u32
ptys2ethtool_link_modes(u32 eth_proto
, enum ethtool_report report
)
566 for (i
= 0; i
< MLX4_LINK_MODES_SZ
; i
++) {
567 if (eth_proto
& MLX4_PROT_MASK(i
))
568 link_modes
|= ptys2ethtool_map
[i
][report
];
573 static u32
ethtool2ptys_link_modes(u32 link_modes
, enum ethtool_report report
)
578 for (i
= 0; i
< MLX4_LINK_MODES_SZ
; i
++) {
579 if (ptys2ethtool_map
[i
][report
] & link_modes
)
580 ptys_modes
|= 1 << i
;
585 /* Convert actual speed (SPEED_XXX) to ptys link modes */
586 static u32
speed2ptys_link_modes(u32 speed
)
591 for (i
= 0; i
< MLX4_LINK_MODES_SZ
; i
++) {
592 if (ptys2ethtool_map
[i
][SPEED
] == speed
)
593 ptys_modes
|= 1 << i
;
598 static int ethtool_get_ptys_settings(struct net_device
*dev
,
599 struct ethtool_cmd
*cmd
)
601 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
602 struct mlx4_ptys_reg ptys_reg
;
606 memset(&ptys_reg
, 0, sizeof(ptys_reg
));
607 ptys_reg
.local_port
= priv
->port
;
608 ptys_reg
.proto_mask
= MLX4_PTYS_EN
;
609 ret
= mlx4_ACCESS_PTYS_REG(priv
->mdev
->dev
,
610 MLX4_ACCESS_REG_QUERY
, &ptys_reg
);
612 en_warn(priv
, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
616 en_dbg(DRV
, priv
, "ptys_reg.proto_mask %x\n",
617 ptys_reg
.proto_mask
);
618 en_dbg(DRV
, priv
, "ptys_reg.eth_proto_cap %x\n",
619 be32_to_cpu(ptys_reg
.eth_proto_cap
));
620 en_dbg(DRV
, priv
, "ptys_reg.eth_proto_admin %x\n",
621 be32_to_cpu(ptys_reg
.eth_proto_admin
));
622 en_dbg(DRV
, priv
, "ptys_reg.eth_proto_oper %x\n",
623 be32_to_cpu(ptys_reg
.eth_proto_oper
));
624 en_dbg(DRV
, priv
, "ptys_reg.eth_proto_lp_adv %x\n",
625 be32_to_cpu(ptys_reg
.eth_proto_lp_adv
));
628 cmd
->advertising
= 0;
630 cmd
->supported
|= ptys_get_supported_port(&ptys_reg
);
632 eth_proto
= be32_to_cpu(ptys_reg
.eth_proto_cap
);
633 cmd
->supported
|= ptys2ethtool_link_modes(eth_proto
, SUPPORTED
);
635 eth_proto
= be32_to_cpu(ptys_reg
.eth_proto_admin
);
636 cmd
->advertising
|= ptys2ethtool_link_modes(eth_proto
, ADVERTISED
);
638 cmd
->supported
|= SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
639 cmd
->advertising
|= (priv
->prof
->tx_pause
) ? ADVERTISED_Pause
: 0;
641 cmd
->advertising
|= (priv
->prof
->tx_pause
^ priv
->prof
->rx_pause
) ?
642 ADVERTISED_Asym_Pause
: 0;
644 cmd
->port
= ptys_get_active_port(&ptys_reg
);
645 cmd
->transceiver
= (SUPPORTED_TP
& cmd
->supported
) ?
646 XCVR_EXTERNAL
: XCVR_INTERNAL
;
648 if (mlx4_en_autoneg_get(dev
)) {
649 cmd
->supported
|= SUPPORTED_Autoneg
;
650 cmd
->advertising
|= ADVERTISED_Autoneg
;
653 cmd
->autoneg
= (priv
->port_state
.flags
& MLX4_EN_PORT_ANC
) ?
654 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
656 eth_proto
= be32_to_cpu(ptys_reg
.eth_proto_lp_adv
);
657 cmd
->lp_advertising
= ptys2ethtool_link_modes(eth_proto
, ADVERTISED
);
659 cmd
->lp_advertising
|= (priv
->port_state
.flags
& MLX4_EN_PORT_ANC
) ?
660 ADVERTISED_Autoneg
: 0;
662 cmd
->phy_address
= 0;
663 cmd
->mdio_support
= 0;
666 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
667 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
672 static void ethtool_get_default_settings(struct net_device
*dev
,
673 struct ethtool_cmd
*cmd
)
675 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
678 cmd
->autoneg
= AUTONEG_DISABLE
;
679 cmd
->supported
= SUPPORTED_10000baseT_Full
;
680 cmd
->advertising
= ADVERTISED_10000baseT_Full
;
681 trans_type
= priv
->port_state
.transceiver
;
683 if (trans_type
> 0 && trans_type
<= 0xC) {
684 cmd
->port
= PORT_FIBRE
;
685 cmd
->transceiver
= XCVR_EXTERNAL
;
686 cmd
->supported
|= SUPPORTED_FIBRE
;
687 cmd
->advertising
|= ADVERTISED_FIBRE
;
688 } else if (trans_type
== 0x80 || trans_type
== 0) {
690 cmd
->transceiver
= XCVR_INTERNAL
;
691 cmd
->supported
|= SUPPORTED_TP
;
692 cmd
->advertising
|= ADVERTISED_TP
;
695 cmd
->transceiver
= -1;
699 static int mlx4_en_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
701 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
704 if (mlx4_en_QUERY_PORT(priv
->mdev
, priv
->port
))
707 en_dbg(DRV
, priv
, "query port state.flags ANC(%x) ANE(%x)\n",
708 priv
->port_state
.flags
& MLX4_EN_PORT_ANC
,
709 priv
->port_state
.flags
& MLX4_EN_PORT_ANE
);
711 if (priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL
)
712 ret
= ethtool_get_ptys_settings(dev
, cmd
);
713 if (ret
) /* ETH PROT CRTL is not supported or PTYS CMD failed */
714 ethtool_get_default_settings(dev
, cmd
);
716 if (netif_carrier_ok(dev
)) {
717 ethtool_cmd_speed_set(cmd
, priv
->port_state
.link_speed
);
718 cmd
->duplex
= DUPLEX_FULL
;
720 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
721 cmd
->duplex
= DUPLEX_UNKNOWN
;
726 /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
727 static __be32
speed_set_ptys_admin(struct mlx4_en_priv
*priv
, u32 speed
,
730 __be32 proto_admin
= 0;
732 if (!speed
) { /* Speed = 0 ==> Reset Link modes */
733 proto_admin
= proto_cap
;
734 en_info(priv
, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
735 be32_to_cpu(proto_cap
));
737 u32 ptys_link_modes
= speed2ptys_link_modes(speed
);
739 proto_admin
= cpu_to_be32(ptys_link_modes
) & proto_cap
;
740 en_info(priv
, "Setting Speed to %d\n", speed
);
745 static int mlx4_en_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
747 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
748 struct mlx4_ptys_reg ptys_reg
;
752 u32 ptys_adv
= ethtool2ptys_link_modes(cmd
->advertising
, ADVERTISED
);
753 int speed
= ethtool_cmd_speed(cmd
);
755 en_dbg(DRV
, priv
, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
756 speed
, cmd
->advertising
, cmd
->autoneg
, cmd
->duplex
);
758 if (!(priv
->mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL
) ||
759 (cmd
->autoneg
== AUTONEG_ENABLE
) || (cmd
->duplex
== DUPLEX_HALF
))
762 memset(&ptys_reg
, 0, sizeof(ptys_reg
));
763 ptys_reg
.local_port
= priv
->port
;
764 ptys_reg
.proto_mask
= MLX4_PTYS_EN
;
765 ret
= mlx4_ACCESS_PTYS_REG(priv
->mdev
->dev
,
766 MLX4_ACCESS_REG_QUERY
, &ptys_reg
);
768 en_warn(priv
, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
773 proto_admin
= cpu_to_be32(ptys_adv
);
774 if (speed
>= 0 && speed
!= priv
->port_state
.link_speed
)
775 /* If speed was set then speed decides :-) */
776 proto_admin
= speed_set_ptys_admin(priv
, speed
,
777 ptys_reg
.eth_proto_cap
);
779 proto_admin
&= ptys_reg
.eth_proto_cap
;
781 if (proto_admin
== ptys_reg
.eth_proto_admin
)
782 return 0; /* Nothing to change */
785 en_warn(priv
, "Not supported link mode(s) requested, check supported link modes.\n");
786 return -EINVAL
; /* nothing to change due to bad input */
789 en_dbg(DRV
, priv
, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
790 be32_to_cpu(proto_admin
));
792 ptys_reg
.eth_proto_admin
= proto_admin
;
793 ret
= mlx4_ACCESS_PTYS_REG(priv
->mdev
->dev
, MLX4_ACCESS_REG_WRITE
,
796 en_warn(priv
, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
797 be32_to_cpu(ptys_reg
.eth_proto_admin
), ret
);
801 en_warn(priv
, "Port link mode changed, restarting port...\n");
802 mutex_lock(&priv
->mdev
->state_lock
);
804 mlx4_en_stop_port(dev
, 1);
805 if (mlx4_en_start_port(dev
))
806 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
808 mutex_unlock(&priv
->mdev
->state_lock
);
812 static int mlx4_en_get_coalesce(struct net_device
*dev
,
813 struct ethtool_coalesce
*coal
)
815 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
817 coal
->tx_coalesce_usecs
= priv
->tx_usecs
;
818 coal
->tx_max_coalesced_frames
= priv
->tx_frames
;
819 coal
->tx_max_coalesced_frames_irq
= priv
->tx_work_limit
;
821 coal
->rx_coalesce_usecs
= priv
->rx_usecs
;
822 coal
->rx_max_coalesced_frames
= priv
->rx_frames
;
824 coal
->pkt_rate_low
= priv
->pkt_rate_low
;
825 coal
->rx_coalesce_usecs_low
= priv
->rx_usecs_low
;
826 coal
->pkt_rate_high
= priv
->pkt_rate_high
;
827 coal
->rx_coalesce_usecs_high
= priv
->rx_usecs_high
;
828 coal
->rate_sample_interval
= priv
->sample_interval
;
829 coal
->use_adaptive_rx_coalesce
= priv
->adaptive_rx_coal
;
834 static int mlx4_en_set_coalesce(struct net_device
*dev
,
835 struct ethtool_coalesce
*coal
)
837 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
839 if (!coal
->tx_max_coalesced_frames_irq
)
842 priv
->rx_frames
= (coal
->rx_max_coalesced_frames
==
844 MLX4_EN_RX_COAL_TARGET
:
845 coal
->rx_max_coalesced_frames
;
846 priv
->rx_usecs
= (coal
->rx_coalesce_usecs
==
848 MLX4_EN_RX_COAL_TIME
:
849 coal
->rx_coalesce_usecs
;
851 /* Setting TX coalescing parameters */
852 if (coal
->tx_coalesce_usecs
!= priv
->tx_usecs
||
853 coal
->tx_max_coalesced_frames
!= priv
->tx_frames
) {
854 priv
->tx_usecs
= coal
->tx_coalesce_usecs
;
855 priv
->tx_frames
= coal
->tx_max_coalesced_frames
;
858 /* Set adaptive coalescing params */
859 priv
->pkt_rate_low
= coal
->pkt_rate_low
;
860 priv
->rx_usecs_low
= coal
->rx_coalesce_usecs_low
;
861 priv
->pkt_rate_high
= coal
->pkt_rate_high
;
862 priv
->rx_usecs_high
= coal
->rx_coalesce_usecs_high
;
863 priv
->sample_interval
= coal
->rate_sample_interval
;
864 priv
->adaptive_rx_coal
= coal
->use_adaptive_rx_coalesce
;
865 priv
->tx_work_limit
= coal
->tx_max_coalesced_frames_irq
;
867 return mlx4_en_moderation_update(priv
);
870 static int mlx4_en_set_pauseparam(struct net_device
*dev
,
871 struct ethtool_pauseparam
*pause
)
873 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
874 struct mlx4_en_dev
*mdev
= priv
->mdev
;
880 priv
->prof
->tx_pause
= pause
->tx_pause
!= 0;
881 priv
->prof
->rx_pause
= pause
->rx_pause
!= 0;
882 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
883 priv
->rx_skb_size
+ ETH_FCS_LEN
,
884 priv
->prof
->tx_pause
,
886 priv
->prof
->rx_pause
,
889 en_err(priv
, "Failed setting pause params\n");
894 static void mlx4_en_get_pauseparam(struct net_device
*dev
,
895 struct ethtool_pauseparam
*pause
)
897 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
899 pause
->tx_pause
= priv
->prof
->tx_pause
;
900 pause
->rx_pause
= priv
->prof
->rx_pause
;
903 static int mlx4_en_set_ringparam(struct net_device
*dev
,
904 struct ethtool_ringparam
*param
)
906 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
907 struct mlx4_en_dev
*mdev
= priv
->mdev
;
908 u32 rx_size
, tx_size
;
912 if (param
->rx_jumbo_pending
|| param
->rx_mini_pending
)
915 rx_size
= roundup_pow_of_two(param
->rx_pending
);
916 rx_size
= max_t(u32
, rx_size
, MLX4_EN_MIN_RX_SIZE
);
917 rx_size
= min_t(u32
, rx_size
, MLX4_EN_MAX_RX_SIZE
);
918 tx_size
= roundup_pow_of_two(param
->tx_pending
);
919 tx_size
= max_t(u32
, tx_size
, MLX4_EN_MIN_TX_SIZE
);
920 tx_size
= min_t(u32
, tx_size
, MLX4_EN_MAX_TX_SIZE
);
922 if (rx_size
== (priv
->port_up
? priv
->rx_ring
[0]->actual_size
:
923 priv
->rx_ring
[0]->size
) &&
924 tx_size
== priv
->tx_ring
[0]->size
)
927 mutex_lock(&mdev
->state_lock
);
930 mlx4_en_stop_port(dev
, 1);
933 mlx4_en_free_resources(priv
);
935 priv
->prof
->tx_ring_size
= tx_size
;
936 priv
->prof
->rx_ring_size
= rx_size
;
938 err
= mlx4_en_alloc_resources(priv
);
940 en_err(priv
, "Failed reallocating port resources\n");
944 err
= mlx4_en_start_port(dev
);
946 en_err(priv
, "Failed starting port\n");
949 err
= mlx4_en_moderation_update(priv
);
952 mutex_unlock(&mdev
->state_lock
);
956 static void mlx4_en_get_ringparam(struct net_device
*dev
,
957 struct ethtool_ringparam
*param
)
959 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
961 memset(param
, 0, sizeof(*param
));
962 param
->rx_max_pending
= MLX4_EN_MAX_RX_SIZE
;
963 param
->tx_max_pending
= MLX4_EN_MAX_TX_SIZE
;
964 param
->rx_pending
= priv
->port_up
?
965 priv
->rx_ring
[0]->actual_size
: priv
->rx_ring
[0]->size
;
966 param
->tx_pending
= priv
->tx_ring
[0]->size
;
969 static u32
mlx4_en_get_rxfh_indir_size(struct net_device
*dev
)
971 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
973 return priv
->rx_ring_num
;
976 static int mlx4_en_get_rxfh(struct net_device
*dev
, u32
*ring_index
, u8
*key
)
978 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
979 struct mlx4_en_rss_map
*rss_map
= &priv
->rss_map
;
981 size_t n
= priv
->rx_ring_num
;
984 rss_rings
= priv
->prof
->rss_rings
?: priv
->rx_ring_num
;
987 ring_index
[n
] = rss_map
->qps
[n
% rss_rings
].qpn
-
994 static int mlx4_en_set_rxfh(struct net_device
*dev
, const u32
*ring_index
,
997 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
998 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1004 /* Calculate RSS table size and make sure flows are spread evenly
1007 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1008 if (i
> 0 && !ring_index
[i
] && !rss_rings
)
1011 if (ring_index
[i
] != (i
% (rss_rings
?: priv
->rx_ring_num
)))
1016 rss_rings
= priv
->rx_ring_num
;
1018 /* RSS table size must be an order of 2 */
1019 if (!is_power_of_2(rss_rings
))
1022 mutex_lock(&mdev
->state_lock
);
1023 if (priv
->port_up
) {
1025 mlx4_en_stop_port(dev
, 1);
1028 priv
->prof
->rss_rings
= rss_rings
;
1031 err
= mlx4_en_start_port(dev
);
1033 en_err(priv
, "Failed starting port\n");
1036 mutex_unlock(&mdev
->state_lock
);
1040 #define all_zeros_or_all_ones(field) \
1041 ((field) == 0 || (field) == (__force typeof(field))-1)
1043 static int mlx4_en_validate_flow(struct net_device
*dev
,
1044 struct ethtool_rxnfc
*cmd
)
1046 struct ethtool_usrip4_spec
*l3_mask
;
1047 struct ethtool_tcpip4_spec
*l4_mask
;
1048 struct ethhdr
*eth_mask
;
1050 if (cmd
->fs
.location
>= MAX_NUM_OF_FS_RULES
)
1053 if (cmd
->fs
.flow_type
& FLOW_MAC_EXT
) {
1054 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1055 if (!is_broadcast_ether_addr(cmd
->fs
.m_ext
.h_dest
))
1059 switch (cmd
->fs
.flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
1062 if (cmd
->fs
.m_u
.tcp_ip4_spec
.tos
)
1064 l4_mask
= &cmd
->fs
.m_u
.tcp_ip4_spec
;
1065 /* don't allow mask which isn't all 0 or 1 */
1066 if (!all_zeros_or_all_ones(l4_mask
->ip4src
) ||
1067 !all_zeros_or_all_ones(l4_mask
->ip4dst
) ||
1068 !all_zeros_or_all_ones(l4_mask
->psrc
) ||
1069 !all_zeros_or_all_ones(l4_mask
->pdst
))
1073 l3_mask
= &cmd
->fs
.m_u
.usr_ip4_spec
;
1074 if (l3_mask
->l4_4_bytes
|| l3_mask
->tos
|| l3_mask
->proto
||
1075 cmd
->fs
.h_u
.usr_ip4_spec
.ip_ver
!= ETH_RX_NFC_IP4
||
1076 (!l3_mask
->ip4src
&& !l3_mask
->ip4dst
) ||
1077 !all_zeros_or_all_ones(l3_mask
->ip4src
) ||
1078 !all_zeros_or_all_ones(l3_mask
->ip4dst
))
1082 eth_mask
= &cmd
->fs
.m_u
.ether_spec
;
1083 /* source mac mask must not be set */
1084 if (!is_zero_ether_addr(eth_mask
->h_source
))
1087 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
1088 if (!is_broadcast_ether_addr(eth_mask
->h_dest
))
1091 if (!all_zeros_or_all_ones(eth_mask
->h_proto
))
1098 if ((cmd
->fs
.flow_type
& FLOW_EXT
)) {
1099 if (cmd
->fs
.m_ext
.vlan_etype
||
1100 !((cmd
->fs
.m_ext
.vlan_tci
& cpu_to_be16(VLAN_VID_MASK
)) ==
1102 (cmd
->fs
.m_ext
.vlan_tci
& cpu_to_be16(VLAN_VID_MASK
)) ==
1103 cpu_to_be16(VLAN_VID_MASK
)))
1106 if (cmd
->fs
.m_ext
.vlan_tci
) {
1107 if (be16_to_cpu(cmd
->fs
.h_ext
.vlan_tci
) >= VLAN_N_VID
)
1116 static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc
*cmd
,
1117 struct list_head
*rule_list_h
,
1118 struct mlx4_spec_list
*spec_l2
,
1122 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
1124 spec_l2
->id
= MLX4_NET_TRANS_RULE_ID_ETH
;
1125 memcpy(spec_l2
->eth
.dst_mac_msk
, &mac_msk
, ETH_ALEN
);
1126 memcpy(spec_l2
->eth
.dst_mac
, mac
, ETH_ALEN
);
1128 if ((cmd
->fs
.flow_type
& FLOW_EXT
) &&
1129 (cmd
->fs
.m_ext
.vlan_tci
& cpu_to_be16(VLAN_VID_MASK
))) {
1130 spec_l2
->eth
.vlan_id
= cmd
->fs
.h_ext
.vlan_tci
;
1131 spec_l2
->eth
.vlan_id_msk
= cpu_to_be16(VLAN_VID_MASK
);
1134 list_add_tail(&spec_l2
->list
, rule_list_h
);
1139 static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv
*priv
,
1140 struct ethtool_rxnfc
*cmd
,
1141 struct list_head
*rule_list_h
,
1142 struct mlx4_spec_list
*spec_l2
,
1146 unsigned char mac
[ETH_ALEN
];
1148 if (!ipv4_is_multicast(ipv4_dst
)) {
1149 if (cmd
->fs
.flow_type
& FLOW_MAC_EXT
)
1150 memcpy(&mac
, cmd
->fs
.h_ext
.h_dest
, ETH_ALEN
);
1152 memcpy(&mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
1154 ip_eth_mc_map(ipv4_dst
, mac
);
1157 return mlx4_en_ethtool_add_mac_rule(cmd
, rule_list_h
, spec_l2
, &mac
[0]);
1163 static int add_ip_rule(struct mlx4_en_priv
*priv
,
1164 struct ethtool_rxnfc
*cmd
,
1165 struct list_head
*list_h
)
1168 struct mlx4_spec_list
*spec_l2
= NULL
;
1169 struct mlx4_spec_list
*spec_l3
= NULL
;
1170 struct ethtool_usrip4_spec
*l3_mask
= &cmd
->fs
.m_u
.usr_ip4_spec
;
1172 spec_l3
= kzalloc(sizeof(*spec_l3
), GFP_KERNEL
);
1173 spec_l2
= kzalloc(sizeof(*spec_l2
), GFP_KERNEL
);
1174 if (!spec_l2
|| !spec_l3
) {
1179 err
= mlx4_en_ethtool_add_mac_rule_by_ipv4(priv
, cmd
, list_h
, spec_l2
,
1181 usr_ip4_spec
.ip4dst
);
1184 spec_l3
->id
= MLX4_NET_TRANS_RULE_ID_IPV4
;
1185 spec_l3
->ipv4
.src_ip
= cmd
->fs
.h_u
.usr_ip4_spec
.ip4src
;
1186 if (l3_mask
->ip4src
)
1187 spec_l3
->ipv4
.src_ip_msk
= EN_ETHTOOL_WORD_MASK
;
1188 spec_l3
->ipv4
.dst_ip
= cmd
->fs
.h_u
.usr_ip4_spec
.ip4dst
;
1189 if (l3_mask
->ip4dst
)
1190 spec_l3
->ipv4
.dst_ip_msk
= EN_ETHTOOL_WORD_MASK
;
1191 list_add_tail(&spec_l3
->list
, list_h
);
1201 static int add_tcp_udp_rule(struct mlx4_en_priv
*priv
,
1202 struct ethtool_rxnfc
*cmd
,
1203 struct list_head
*list_h
, int proto
)
1206 struct mlx4_spec_list
*spec_l2
= NULL
;
1207 struct mlx4_spec_list
*spec_l3
= NULL
;
1208 struct mlx4_spec_list
*spec_l4
= NULL
;
1209 struct ethtool_tcpip4_spec
*l4_mask
= &cmd
->fs
.m_u
.tcp_ip4_spec
;
1211 spec_l2
= kzalloc(sizeof(*spec_l2
), GFP_KERNEL
);
1212 spec_l3
= kzalloc(sizeof(*spec_l3
), GFP_KERNEL
);
1213 spec_l4
= kzalloc(sizeof(*spec_l4
), GFP_KERNEL
);
1214 if (!spec_l2
|| !spec_l3
|| !spec_l4
) {
1219 spec_l3
->id
= MLX4_NET_TRANS_RULE_ID_IPV4
;
1221 if (proto
== TCP_V4_FLOW
) {
1222 err
= mlx4_en_ethtool_add_mac_rule_by_ipv4(priv
, cmd
, list_h
,
1225 tcp_ip4_spec
.ip4dst
);
1228 spec_l4
->id
= MLX4_NET_TRANS_RULE_ID_TCP
;
1229 spec_l3
->ipv4
.src_ip
= cmd
->fs
.h_u
.tcp_ip4_spec
.ip4src
;
1230 spec_l3
->ipv4
.dst_ip
= cmd
->fs
.h_u
.tcp_ip4_spec
.ip4dst
;
1231 spec_l4
->tcp_udp
.src_port
= cmd
->fs
.h_u
.tcp_ip4_spec
.psrc
;
1232 spec_l4
->tcp_udp
.dst_port
= cmd
->fs
.h_u
.tcp_ip4_spec
.pdst
;
1234 err
= mlx4_en_ethtool_add_mac_rule_by_ipv4(priv
, cmd
, list_h
,
1237 udp_ip4_spec
.ip4dst
);
1240 spec_l4
->id
= MLX4_NET_TRANS_RULE_ID_UDP
;
1241 spec_l3
->ipv4
.src_ip
= cmd
->fs
.h_u
.udp_ip4_spec
.ip4src
;
1242 spec_l3
->ipv4
.dst_ip
= cmd
->fs
.h_u
.udp_ip4_spec
.ip4dst
;
1243 spec_l4
->tcp_udp
.src_port
= cmd
->fs
.h_u
.udp_ip4_spec
.psrc
;
1244 spec_l4
->tcp_udp
.dst_port
= cmd
->fs
.h_u
.udp_ip4_spec
.pdst
;
1247 if (l4_mask
->ip4src
)
1248 spec_l3
->ipv4
.src_ip_msk
= EN_ETHTOOL_WORD_MASK
;
1249 if (l4_mask
->ip4dst
)
1250 spec_l3
->ipv4
.dst_ip_msk
= EN_ETHTOOL_WORD_MASK
;
1253 spec_l4
->tcp_udp
.src_port_msk
= EN_ETHTOOL_SHORT_MASK
;
1255 spec_l4
->tcp_udp
.dst_port_msk
= EN_ETHTOOL_SHORT_MASK
;
1257 list_add_tail(&spec_l3
->list
, list_h
);
1258 list_add_tail(&spec_l4
->list
, list_h
);
1269 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device
*dev
,
1270 struct ethtool_rxnfc
*cmd
,
1271 struct list_head
*rule_list_h
)
1274 struct ethhdr
*eth_spec
;
1275 struct mlx4_spec_list
*spec_l2
;
1276 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1278 err
= mlx4_en_validate_flow(dev
, cmd
);
1282 switch (cmd
->fs
.flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
1284 spec_l2
= kzalloc(sizeof(*spec_l2
), GFP_KERNEL
);
1288 eth_spec
= &cmd
->fs
.h_u
.ether_spec
;
1289 mlx4_en_ethtool_add_mac_rule(cmd
, rule_list_h
, spec_l2
,
1290 ð_spec
->h_dest
[0]);
1291 spec_l2
->eth
.ether_type
= eth_spec
->h_proto
;
1292 if (eth_spec
->h_proto
)
1293 spec_l2
->eth
.ether_type_enable
= 1;
1296 err
= add_ip_rule(priv
, cmd
, rule_list_h
);
1299 err
= add_tcp_udp_rule(priv
, cmd
, rule_list_h
, TCP_V4_FLOW
);
1302 err
= add_tcp_udp_rule(priv
, cmd
, rule_list_h
, UDP_V4_FLOW
);
1309 static int mlx4_en_flow_replace(struct net_device
*dev
,
1310 struct ethtool_rxnfc
*cmd
)
1313 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1314 struct ethtool_flow_id
*loc_rule
;
1315 struct mlx4_spec_list
*spec
, *tmp_spec
;
1319 struct mlx4_net_trans_rule rule
= {
1320 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
1322 .allow_loopback
= 1,
1323 .promisc_mode
= MLX4_FS_REGULAR
,
1326 rule
.port
= priv
->port
;
1327 rule
.priority
= MLX4_DOMAIN_ETHTOOL
| cmd
->fs
.location
;
1328 INIT_LIST_HEAD(&rule
.list
);
1330 /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
1331 if (cmd
->fs
.ring_cookie
== RX_CLS_FLOW_DISC
)
1332 qpn
= priv
->drop_qp
.qpn
;
1333 else if (cmd
->fs
.ring_cookie
& EN_ETHTOOL_QP_ATTACH
) {
1334 qpn
= cmd
->fs
.ring_cookie
& (EN_ETHTOOL_QP_ATTACH
- 1);
1336 if (cmd
->fs
.ring_cookie
>= priv
->rx_ring_num
) {
1337 en_warn(priv
, "rxnfc: RX ring (%llu) doesn't exist\n",
1338 cmd
->fs
.ring_cookie
);
1341 qpn
= priv
->rss_map
.qps
[cmd
->fs
.ring_cookie
].qpn
;
1343 en_warn(priv
, "rxnfc: RX ring (%llu) is inactive\n",
1344 cmd
->fs
.ring_cookie
);
1349 err
= mlx4_en_ethtool_to_net_trans_rule(dev
, cmd
, &rule
.list
);
1353 loc_rule
= &priv
->ethtool_rules
[cmd
->fs
.location
];
1355 err
= mlx4_flow_detach(priv
->mdev
->dev
, loc_rule
->id
);
1357 en_err(priv
, "Fail to detach network rule at location %d. registration id = %llx\n",
1358 cmd
->fs
.location
, loc_rule
->id
);
1362 memset(&loc_rule
->flow_spec
, 0,
1363 sizeof(struct ethtool_rx_flow_spec
));
1364 list_del(&loc_rule
->list
);
1366 err
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, ®_id
);
1368 en_err(priv
, "Fail to attach network rule at location %d\n",
1372 loc_rule
->id
= reg_id
;
1373 memcpy(&loc_rule
->flow_spec
, &cmd
->fs
,
1374 sizeof(struct ethtool_rx_flow_spec
));
1375 list_add_tail(&loc_rule
->list
, &priv
->ethtool_list
);
1378 list_for_each_entry_safe(spec
, tmp_spec
, &rule
.list
, list
) {
1379 list_del(&spec
->list
);
1385 static int mlx4_en_flow_detach(struct net_device
*dev
,
1386 struct ethtool_rxnfc
*cmd
)
1389 struct ethtool_flow_id
*rule
;
1390 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1392 if (cmd
->fs
.location
>= MAX_NUM_OF_FS_RULES
)
1395 rule
= &priv
->ethtool_rules
[cmd
->fs
.location
];
1401 err
= mlx4_flow_detach(priv
->mdev
->dev
, rule
->id
);
1403 en_err(priv
, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
1404 cmd
->fs
.location
, rule
->id
);
1408 memset(&rule
->flow_spec
, 0, sizeof(struct ethtool_rx_flow_spec
));
1409 list_del(&rule
->list
);
1415 static int mlx4_en_get_flow(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1419 struct ethtool_flow_id
*rule
;
1420 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1422 if (loc
< 0 || loc
>= MAX_NUM_OF_FS_RULES
)
1425 rule
= &priv
->ethtool_rules
[loc
];
1427 memcpy(&cmd
->fs
, &rule
->flow_spec
,
1428 sizeof(struct ethtool_rx_flow_spec
));
1435 static int mlx4_en_get_num_flows(struct mlx4_en_priv
*priv
)
1439 for (i
= 0; i
< MAX_NUM_OF_FS_RULES
; i
++) {
1440 if (priv
->ethtool_rules
[i
].id
)
1447 static int mlx4_en_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1450 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1451 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1453 int i
= 0, priority
= 0;
1455 if ((cmd
->cmd
== ETHTOOL_GRXCLSRLCNT
||
1456 cmd
->cmd
== ETHTOOL_GRXCLSRULE
||
1457 cmd
->cmd
== ETHTOOL_GRXCLSRLALL
) &&
1458 (mdev
->dev
->caps
.steering_mode
!=
1459 MLX4_STEERING_MODE_DEVICE_MANAGED
|| !priv
->port_up
))
1463 case ETHTOOL_GRXRINGS
:
1464 cmd
->data
= priv
->rx_ring_num
;
1466 case ETHTOOL_GRXCLSRLCNT
:
1467 cmd
->rule_cnt
= mlx4_en_get_num_flows(priv
);
1469 case ETHTOOL_GRXCLSRULE
:
1470 err
= mlx4_en_get_flow(dev
, cmd
, cmd
->fs
.location
);
1472 case ETHTOOL_GRXCLSRLALL
:
1473 while ((!err
|| err
== -ENOENT
) && priority
< cmd
->rule_cnt
) {
1474 err
= mlx4_en_get_flow(dev
, cmd
, i
);
1476 rule_locs
[priority
++] = i
;
1489 static int mlx4_en_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1492 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1493 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1495 if (mdev
->dev
->caps
.steering_mode
!=
1496 MLX4_STEERING_MODE_DEVICE_MANAGED
|| !priv
->port_up
)
1500 case ETHTOOL_SRXCLSRLINS
:
1501 err
= mlx4_en_flow_replace(dev
, cmd
);
1503 case ETHTOOL_SRXCLSRLDEL
:
1504 err
= mlx4_en_flow_detach(dev
, cmd
);
1507 en_warn(priv
, "Unsupported ethtool command. (%d)\n", cmd
->cmd
);
1514 static void mlx4_en_get_channels(struct net_device
*dev
,
1515 struct ethtool_channels
*channel
)
1517 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1519 memset(channel
, 0, sizeof(*channel
));
1521 channel
->max_rx
= MAX_RX_RINGS
;
1522 channel
->max_tx
= MLX4_EN_MAX_TX_RING_P_UP
;
1524 channel
->rx_count
= priv
->rx_ring_num
;
1525 channel
->tx_count
= priv
->tx_ring_num
/ MLX4_EN_NUM_UP
;
1528 static int mlx4_en_set_channels(struct net_device
*dev
,
1529 struct ethtool_channels
*channel
)
1531 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1532 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1536 if (channel
->other_count
|| channel
->combined_count
||
1537 channel
->tx_count
> MLX4_EN_MAX_TX_RING_P_UP
||
1538 channel
->rx_count
> MAX_RX_RINGS
||
1539 !channel
->tx_count
|| !channel
->rx_count
)
1542 mutex_lock(&mdev
->state_lock
);
1543 if (priv
->port_up
) {
1545 mlx4_en_stop_port(dev
, 1);
1548 mlx4_en_free_resources(priv
);
1550 priv
->num_tx_rings_p_up
= channel
->tx_count
;
1551 priv
->tx_ring_num
= channel
->tx_count
* MLX4_EN_NUM_UP
;
1552 priv
->rx_ring_num
= channel
->rx_count
;
1554 err
= mlx4_en_alloc_resources(priv
);
1556 en_err(priv
, "Failed reallocating port resources\n");
1560 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
1561 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
1564 mlx4_en_setup_tc(dev
, MLX4_EN_NUM_UP
);
1566 en_warn(priv
, "Using %d TX rings\n", priv
->tx_ring_num
);
1567 en_warn(priv
, "Using %d RX rings\n", priv
->rx_ring_num
);
1570 err
= mlx4_en_start_port(dev
);
1572 en_err(priv
, "Failed starting port\n");
1575 err
= mlx4_en_moderation_update(priv
);
1578 mutex_unlock(&mdev
->state_lock
);
1582 static int mlx4_en_get_ts_info(struct net_device
*dev
,
1583 struct ethtool_ts_info
*info
)
1585 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1586 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1589 ret
= ethtool_op_get_ts_info(dev
, info
);
1593 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
1594 info
->so_timestamping
|=
1595 SOF_TIMESTAMPING_TX_HARDWARE
|
1596 SOF_TIMESTAMPING_RX_HARDWARE
|
1597 SOF_TIMESTAMPING_RAW_HARDWARE
;
1600 (1 << HWTSTAMP_TX_OFF
) |
1601 (1 << HWTSTAMP_TX_ON
);
1604 (1 << HWTSTAMP_FILTER_NONE
) |
1605 (1 << HWTSTAMP_FILTER_ALL
);
1607 if (mdev
->ptp_clock
)
1608 info
->phc_index
= ptp_clock_index(mdev
->ptp_clock
);
1614 static int mlx4_en_set_priv_flags(struct net_device
*dev
, u32 flags
)
1616 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1617 bool bf_enabled_new
= !!(flags
& MLX4_EN_PRIV_FLAGS_BLUEFLAME
);
1618 bool bf_enabled_old
= !!(priv
->pflags
& MLX4_EN_PRIV_FLAGS_BLUEFLAME
);
1621 if (bf_enabled_new
== bf_enabled_old
)
1622 return 0; /* Nothing to do */
1624 if (bf_enabled_new
) {
1625 bool bf_supported
= true;
1627 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1628 bf_supported
&= priv
->tx_ring
[i
]->bf_alloced
;
1630 if (!bf_supported
) {
1631 en_err(priv
, "BlueFlame is not supported\n");
1635 priv
->pflags
|= MLX4_EN_PRIV_FLAGS_BLUEFLAME
;
1637 priv
->pflags
&= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME
;
1640 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1641 priv
->tx_ring
[i
]->bf_enabled
= bf_enabled_new
;
1643 en_info(priv
, "BlueFlame %s\n",
1644 bf_enabled_new
? "Enabled" : "Disabled");
1649 static u32
mlx4_en_get_priv_flags(struct net_device
*dev
)
1651 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1653 return priv
->pflags
;
1656 static int mlx4_en_get_tunable(struct net_device
*dev
,
1657 const struct ethtool_tunable
*tuna
,
1660 const struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1664 case ETHTOOL_TX_COPYBREAK
:
1665 *(u32
*)data
= priv
->prof
->inline_thold
;
1675 static int mlx4_en_set_tunable(struct net_device
*dev
,
1676 const struct ethtool_tunable
*tuna
,
1679 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1683 case ETHTOOL_TX_COPYBREAK
:
1685 if (val
< MIN_PKT_LEN
|| val
> MAX_INLINE
)
1688 priv
->prof
->inline_thold
= val
;
1698 static int mlx4_en_get_module_info(struct net_device
*dev
,
1699 struct ethtool_modinfo
*modinfo
)
1701 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1702 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1706 /* Read first 2 bytes to get Module & REV ID */
1707 ret
= mlx4_get_module_info(mdev
->dev
, priv
->port
,
1708 0/*offset*/, 2/*size*/, data
);
1712 switch (data
[0] /* identifier */) {
1713 case MLX4_MODULE_ID_QSFP
:
1714 modinfo
->type
= ETH_MODULE_SFF_8436
;
1715 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1717 case MLX4_MODULE_ID_QSFP_PLUS
:
1718 if (data
[1] >= 0x3) { /* revision id */
1719 modinfo
->type
= ETH_MODULE_SFF_8636
;
1720 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1722 modinfo
->type
= ETH_MODULE_SFF_8436
;
1723 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1726 case MLX4_MODULE_ID_QSFP28
:
1727 modinfo
->type
= ETH_MODULE_SFF_8636
;
1728 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1730 case MLX4_MODULE_ID_SFP
:
1731 modinfo
->type
= ETH_MODULE_SFF_8472
;
1732 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1741 static int mlx4_en_get_module_eeprom(struct net_device
*dev
,
1742 struct ethtool_eeprom
*ee
,
1745 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1746 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1747 int offset
= ee
->offset
;
1753 memset(data
, 0, ee
->len
);
1755 while (i
< ee
->len
) {
1757 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
1758 i
, offset
, ee
->len
- i
);
1760 ret
= mlx4_get_module_info(mdev
->dev
, priv
->port
,
1761 offset
, ee
->len
- i
, data
+ i
);
1763 if (!ret
) /* Done reading */
1768 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
1769 i
, offset
, ee
->len
- i
, ret
);
1779 const struct ethtool_ops mlx4_en_ethtool_ops
= {
1780 .get_drvinfo
= mlx4_en_get_drvinfo
,
1781 .get_settings
= mlx4_en_get_settings
,
1782 .set_settings
= mlx4_en_set_settings
,
1783 .get_link
= ethtool_op_get_link
,
1784 .get_strings
= mlx4_en_get_strings
,
1785 .get_sset_count
= mlx4_en_get_sset_count
,
1786 .get_ethtool_stats
= mlx4_en_get_ethtool_stats
,
1787 .self_test
= mlx4_en_self_test
,
1788 .get_wol
= mlx4_en_get_wol
,
1789 .set_wol
= mlx4_en_set_wol
,
1790 .get_msglevel
= mlx4_en_get_msglevel
,
1791 .set_msglevel
= mlx4_en_set_msglevel
,
1792 .get_coalesce
= mlx4_en_get_coalesce
,
1793 .set_coalesce
= mlx4_en_set_coalesce
,
1794 .get_pauseparam
= mlx4_en_get_pauseparam
,
1795 .set_pauseparam
= mlx4_en_set_pauseparam
,
1796 .get_ringparam
= mlx4_en_get_ringparam
,
1797 .set_ringparam
= mlx4_en_set_ringparam
,
1798 .get_rxnfc
= mlx4_en_get_rxnfc
,
1799 .set_rxnfc
= mlx4_en_set_rxnfc
,
1800 .get_rxfh_indir_size
= mlx4_en_get_rxfh_indir_size
,
1801 .get_rxfh
= mlx4_en_get_rxfh
,
1802 .set_rxfh
= mlx4_en_set_rxfh
,
1803 .get_channels
= mlx4_en_get_channels
,
1804 .set_channels
= mlx4_en_set_channels
,
1805 .get_ts_info
= mlx4_en_get_ts_info
,
1806 .set_priv_flags
= mlx4_en_set_priv_flags
,
1807 .get_priv_flags
= mlx4_en_get_priv_flags
,
1808 .get_tunable
= mlx4_en_get_tunable
,
1809 .set_tunable
= mlx4_en_set_tunable
,
1810 .get_module_info
= mlx4_en_get_module_info
,
1811 .get_module_eeprom
= mlx4_en_get_module_eeprom