2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static void mlx5e_get_drvinfo(struct net_device
*dev
,
36 struct ethtool_drvinfo
*drvinfo
)
38 struct mlx5e_priv
*priv
= netdev_priv(dev
);
39 struct mlx5_core_dev
*mdev
= priv
->mdev
;
41 strlcpy(drvinfo
->driver
, DRIVER_NAME
, sizeof(drvinfo
->driver
));
42 strlcpy(drvinfo
->version
, DRIVER_VERSION
" (" DRIVER_RELDATE
")",
43 sizeof(drvinfo
->version
));
44 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
46 fw_rev_maj(mdev
), fw_rev_min(mdev
), fw_rev_sub(mdev
));
47 strlcpy(drvinfo
->bus_info
, pci_name(mdev
->pdev
),
48 sizeof(drvinfo
->bus_info
));
51 struct ptys2ethtool_config
{
52 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported
);
53 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised
);
57 static struct ptys2ethtool_config ptys2ethtool_table
[MLX5E_LINK_MODES_NUMBER
];
59 #define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
61 struct ptys2ethtool_config *cfg; \
62 const unsigned int modes[] = { __VA_ARGS__ }; \
64 cfg = &ptys2ethtool_table[reg_]; \
65 cfg->speed = speed_; \
66 bitmap_zero(cfg->supported, \
67 __ETHTOOL_LINK_MODE_MASK_NBITS); \
68 bitmap_zero(cfg->advertised, \
69 __ETHTOOL_LINK_MODE_MASK_NBITS); \
70 for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
71 __set_bit(modes[i], cfg->supported); \
72 __set_bit(modes[i], cfg->advertised); \
76 void mlx5e_build_ptys2ethtool_map(void)
78 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII
, SPEED_1000
,
79 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
);
80 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX
, SPEED_1000
,
81 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
);
82 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4
, SPEED_10000
,
83 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
);
84 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4
, SPEED_10000
,
85 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
);
86 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR
, SPEED_10000
,
87 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
88 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2
, SPEED_20000
,
89 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
);
90 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4
, SPEED_40000
,
91 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
);
92 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4
, SPEED_40000
,
93 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
);
94 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4
, SPEED_56000
,
95 ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
);
96 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR
, SPEED_10000
,
97 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
98 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR
, SPEED_10000
,
99 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
100 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER
, SPEED_10000
,
101 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
102 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4
, SPEED_40000
,
103 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
);
104 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4
, SPEED_40000
,
105 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
);
106 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2
, SPEED_50000
,
107 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
);
108 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4
, SPEED_100000
,
109 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
);
110 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4
, SPEED_100000
,
111 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
);
112 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4
, SPEED_100000
,
113 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
);
114 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4
, SPEED_100000
,
115 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
);
116 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T
, SPEED_10000
,
117 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
);
118 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR
, SPEED_25000
,
119 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
);
120 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR
, SPEED_25000
,
121 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
);
122 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR
, SPEED_25000
,
123 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
);
124 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2
, SPEED_50000
,
125 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
);
126 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2
, SPEED_50000
,
127 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
);
130 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv
*priv
)
132 struct mlx5_core_dev
*mdev
= priv
->mdev
;
137 err
= mlx5_query_port_pfc(mdev
, &pfc_en_tx
, &pfc_en_rx
);
139 return err
? 0 : pfc_en_tx
| pfc_en_rx
;
142 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv
*priv
)
144 struct mlx5_core_dev
*mdev
= priv
->mdev
;
149 err
= mlx5_query_port_pause(mdev
, &rx_pause
, &tx_pause
);
151 return err
? false : rx_pause
| tx_pause
;
154 #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
155 #define MLX5E_NUM_RQ_STATS(priv) \
156 (NUM_RQ_STATS * priv->params.num_channels * \
157 test_bit(MLX5E_STATE_OPENED, &priv->state))
158 #define MLX5E_NUM_SQ_STATS(priv) \
159 (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
160 test_bit(MLX5E_STATE_OPENED, &priv->state))
161 #define MLX5E_NUM_PFC_COUNTERS(priv) \
162 ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
163 NUM_PPORT_PER_PRIO_PFC_COUNTERS)
165 static int mlx5e_get_sset_count(struct net_device
*dev
, int sset
)
167 struct mlx5e_priv
*priv
= netdev_priv(dev
);
171 return NUM_SW_COUNTERS
+
172 MLX5E_NUM_Q_CNTRS(priv
) +
173 NUM_VPORT_COUNTERS
+ NUM_PPORT_COUNTERS(priv
) +
174 NUM_PCIE_COUNTERS(priv
) +
175 MLX5E_NUM_RQ_STATS(priv
) +
176 MLX5E_NUM_SQ_STATS(priv
) +
177 MLX5E_NUM_PFC_COUNTERS(priv
) +
178 ARRAY_SIZE(mlx5e_pme_status_desc
) +
179 ARRAY_SIZE(mlx5e_pme_error_desc
);
181 case ETH_SS_PRIV_FLAGS
:
182 return ARRAY_SIZE(mlx5e_priv_flags
);
184 return mlx5e_self_test_num(priv
);
191 static void mlx5e_fill_stats_strings(struct mlx5e_priv
*priv
, uint8_t *data
)
193 int i
, j
, tc
, prio
, idx
= 0;
194 unsigned long pfc_combined
;
197 for (i
= 0; i
< NUM_SW_COUNTERS
; i
++)
198 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, sw_stats_desc
[i
].format
);
201 for (i
= 0; i
< MLX5E_NUM_Q_CNTRS(priv
); i
++)
202 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, q_stats_desc
[i
].format
);
205 for (i
= 0; i
< NUM_VPORT_COUNTERS
; i
++)
206 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
207 vport_stats_desc
[i
].format
);
210 for (i
= 0; i
< NUM_PPORT_802_3_COUNTERS
; i
++)
211 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
212 pport_802_3_stats_desc
[i
].format
);
214 for (i
= 0; i
< NUM_PPORT_2863_COUNTERS
; i
++)
215 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
216 pport_2863_stats_desc
[i
].format
);
218 for (i
= 0; i
< NUM_PPORT_2819_COUNTERS
; i
++)
219 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
220 pport_2819_stats_desc
[i
].format
);
222 for (i
= 0; i
< NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv
); i
++)
223 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
224 pport_phy_statistical_stats_desc
[i
].format
);
226 for (i
= 0; i
< NUM_PCIE_PERF_COUNTERS(priv
); i
++)
227 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
228 pcie_perf_stats_desc
[i
].format
);
230 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
231 for (i
= 0; i
< NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS
; i
++)
232 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
233 pport_per_prio_traffic_stats_desc
[i
].format
, prio
);
236 pfc_combined
= mlx5e_query_pfc_combined(priv
);
237 for_each_set_bit(prio
, &pfc_combined
, NUM_PPORT_PRIO
) {
238 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
239 char pfc_string
[ETH_GSTRING_LEN
];
241 snprintf(pfc_string
, sizeof(pfc_string
), "prio%d", prio
);
242 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
243 pport_per_prio_pfc_stats_desc
[i
].format
, pfc_string
);
247 if (mlx5e_query_global_pause_combined(priv
)) {
248 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
249 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
250 pport_per_prio_pfc_stats_desc
[i
].format
, "global");
254 /* port module event counters */
255 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_status_desc
); i
++)
256 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, mlx5e_pme_status_desc
[i
].format
);
258 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_error_desc
); i
++)
259 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, mlx5e_pme_error_desc
[i
].format
);
261 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
264 /* per channel counters */
265 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
266 for (j
= 0; j
< NUM_RQ_STATS
; j
++)
267 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
268 rq_stats_desc
[j
].format
, i
);
270 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
271 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
272 for (j
= 0; j
< NUM_SQ_STATS
; j
++)
273 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
274 sq_stats_desc
[j
].format
,
275 priv
->channeltc_to_txq_map
[i
][tc
]);
278 static void mlx5e_get_strings(struct net_device
*dev
,
279 uint32_t stringset
, uint8_t *data
)
281 struct mlx5e_priv
*priv
= netdev_priv(dev
);
285 case ETH_SS_PRIV_FLAGS
:
286 for (i
= 0; i
< ARRAY_SIZE(mlx5e_priv_flags
); i
++)
287 strcpy(data
+ i
* ETH_GSTRING_LEN
, mlx5e_priv_flags
[i
]);
291 for (i
= 0; i
< mlx5e_self_test_num(priv
); i
++)
292 strcpy(data
+ i
* ETH_GSTRING_LEN
,
293 mlx5e_self_tests
[i
]);
297 mlx5e_fill_stats_strings(priv
, data
);
302 static void mlx5e_get_ethtool_stats(struct net_device
*dev
,
303 struct ethtool_stats
*stats
, u64
*data
)
305 struct mlx5e_priv
*priv
= netdev_priv(dev
);
306 struct mlx5_priv
*mlx5_priv
;
307 int i
, j
, tc
, prio
, idx
= 0;
308 unsigned long pfc_combined
;
313 mutex_lock(&priv
->state_lock
);
314 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
315 mlx5e_update_stats(priv
);
316 mutex_unlock(&priv
->state_lock
);
318 for (i
= 0; i
< NUM_SW_COUNTERS
; i
++)
319 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.sw
,
322 for (i
= 0; i
< MLX5E_NUM_Q_CNTRS(priv
); i
++)
323 data
[idx
++] = MLX5E_READ_CTR32_CPU(&priv
->stats
.qcnt
,
326 for (i
= 0; i
< NUM_VPORT_COUNTERS
; i
++)
327 data
[idx
++] = MLX5E_READ_CTR64_BE(priv
->stats
.vport
.query_vport_out
,
328 vport_stats_desc
, i
);
330 for (i
= 0; i
< NUM_PPORT_802_3_COUNTERS
; i
++)
331 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.IEEE_802_3_counters
,
332 pport_802_3_stats_desc
, i
);
334 for (i
= 0; i
< NUM_PPORT_2863_COUNTERS
; i
++)
335 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.RFC_2863_counters
,
336 pport_2863_stats_desc
, i
);
338 for (i
= 0; i
< NUM_PPORT_2819_COUNTERS
; i
++)
339 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.RFC_2819_counters
,
340 pport_2819_stats_desc
, i
);
342 for (i
= 0; i
< NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv
); i
++)
343 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.phy_statistical_counters
,
344 pport_phy_statistical_stats_desc
, i
);
346 for (i
= 0; i
< NUM_PCIE_PERF_COUNTERS(priv
); i
++)
347 data
[idx
++] = MLX5E_READ_CTR32_BE(&priv
->stats
.pcie
.pcie_perf_counters
,
348 pcie_perf_stats_desc
, i
);
350 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
351 for (i
= 0; i
< NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS
; i
++)
352 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[prio
],
353 pport_per_prio_traffic_stats_desc
, i
);
356 pfc_combined
= mlx5e_query_pfc_combined(priv
);
357 for_each_set_bit(prio
, &pfc_combined
, NUM_PPORT_PRIO
) {
358 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
359 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[prio
],
360 pport_per_prio_pfc_stats_desc
, i
);
364 if (mlx5e_query_global_pause_combined(priv
)) {
365 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
366 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[0],
367 pport_per_prio_pfc_stats_desc
, i
);
371 /* port module event counters */
372 mlx5_priv
= &priv
->mdev
->priv
;
373 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_status_desc
); i
++)
374 data
[idx
++] = MLX5E_READ_CTR64_CPU(mlx5_priv
->pme_stats
.status_counters
,
375 mlx5e_pme_status_desc
, i
);
377 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_error_desc
); i
++)
378 data
[idx
++] = MLX5E_READ_CTR64_CPU(mlx5_priv
->pme_stats
.error_counters
,
379 mlx5e_pme_error_desc
, i
);
381 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
384 /* per channel counters */
385 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
386 for (j
= 0; j
< NUM_RQ_STATS
; j
++)
388 MLX5E_READ_CTR64_CPU(&priv
->channel
[i
]->rq
.stats
,
391 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
392 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
393 for (j
= 0; j
< NUM_SQ_STATS
; j
++)
394 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->channel
[i
]->sq
[tc
].stats
,
398 static u32
mlx5e_rx_wqes_to_packets(struct mlx5e_priv
*priv
, int rq_wq_type
,
406 if (rq_wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
409 stride_size
= 1 << priv
->params
.mpwqe_log_stride_sz
;
410 num_strides
= 1 << priv
->params
.mpwqe_log_num_strides
;
411 wqe_size
= stride_size
* num_strides
;
413 packets_per_wqe
= wqe_size
/
414 ALIGN(ETH_DATA_LEN
, stride_size
);
415 return (1 << (order_base_2(num_wqe
* packets_per_wqe
) - 1));
418 static u32
mlx5e_packets_to_rx_wqes(struct mlx5e_priv
*priv
, int rq_wq_type
,
427 if (rq_wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
430 stride_size
= 1 << priv
->params
.mpwqe_log_stride_sz
;
431 num_strides
= 1 << priv
->params
.mpwqe_log_num_strides
;
432 wqe_size
= stride_size
* num_strides
;
434 num_packets
= (1 << order_base_2(num_packets
));
436 packets_per_wqe
= wqe_size
/
437 ALIGN(ETH_DATA_LEN
, stride_size
);
438 num_wqes
= DIV_ROUND_UP(num_packets
, packets_per_wqe
);
439 return 1 << (order_base_2(num_wqes
));
442 static void mlx5e_get_ringparam(struct net_device
*dev
,
443 struct ethtool_ringparam
*param
)
445 struct mlx5e_priv
*priv
= netdev_priv(dev
);
446 int rq_wq_type
= priv
->params
.rq_wq_type
;
448 param
->rx_max_pending
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
449 1 << mlx5_max_log_rq_size(rq_wq_type
));
450 param
->tx_max_pending
= 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
;
451 param
->rx_pending
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
452 1 << priv
->params
.log_rq_size
);
453 param
->tx_pending
= 1 << priv
->params
.log_sq_size
;
456 static int mlx5e_set_ringparam(struct net_device
*dev
,
457 struct ethtool_ringparam
*param
)
459 struct mlx5e_priv
*priv
= netdev_priv(dev
);
461 int rq_wq_type
= priv
->params
.rq_wq_type
;
471 if (param
->rx_jumbo_pending
) {
472 netdev_info(dev
, "%s: rx_jumbo_pending not supported\n",
476 if (param
->rx_mini_pending
) {
477 netdev_info(dev
, "%s: rx_mini_pending not supported\n",
482 min_rq_size
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
483 1 << mlx5_min_log_rq_size(rq_wq_type
));
484 max_rq_size
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
485 1 << mlx5_max_log_rq_size(rq_wq_type
));
486 rx_pending_wqes
= mlx5e_packets_to_rx_wqes(priv
, rq_wq_type
,
489 if (param
->rx_pending
< min_rq_size
) {
490 netdev_info(dev
, "%s: rx_pending (%d) < min (%d)\n",
491 __func__
, param
->rx_pending
,
495 if (param
->rx_pending
> max_rq_size
) {
496 netdev_info(dev
, "%s: rx_pending (%d) > max (%d)\n",
497 __func__
, param
->rx_pending
,
502 num_mtts
= MLX5E_REQUIRED_MTTS(rx_pending_wqes
);
503 if (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
&&
504 !MLX5E_VALID_NUM_MTTS(num_mtts
)) {
505 netdev_info(dev
, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
506 __func__
, param
->rx_pending
);
510 if (param
->tx_pending
< (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
)) {
511 netdev_info(dev
, "%s: tx_pending (%d) < min (%d)\n",
512 __func__
, param
->tx_pending
,
513 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
);
516 if (param
->tx_pending
> (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
)) {
517 netdev_info(dev
, "%s: tx_pending (%d) > max (%d)\n",
518 __func__
, param
->tx_pending
,
519 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
);
523 log_rq_size
= order_base_2(rx_pending_wqes
);
524 log_sq_size
= order_base_2(param
->tx_pending
);
525 min_rx_wqes
= mlx5_min_rx_wqes(rq_wq_type
, rx_pending_wqes
);
527 if (log_rq_size
== priv
->params
.log_rq_size
&&
528 log_sq_size
== priv
->params
.log_sq_size
&&
529 min_rx_wqes
== priv
->params
.min_rx_wqes
)
532 mutex_lock(&priv
->state_lock
);
534 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
536 mlx5e_close_locked(dev
);
538 priv
->params
.log_rq_size
= log_rq_size
;
539 priv
->params
.log_sq_size
= log_sq_size
;
540 priv
->params
.min_rx_wqes
= min_rx_wqes
;
543 err
= mlx5e_open_locked(dev
);
545 mutex_unlock(&priv
->state_lock
);
550 static void mlx5e_get_channels(struct net_device
*dev
,
551 struct ethtool_channels
*ch
)
553 struct mlx5e_priv
*priv
= netdev_priv(dev
);
555 ch
->max_combined
= priv
->profile
->max_nch(priv
->mdev
);
556 ch
->combined_count
= priv
->params
.num_channels
;
559 static int mlx5e_set_channels(struct net_device
*dev
,
560 struct ethtool_channels
*ch
)
562 struct mlx5e_priv
*priv
= netdev_priv(dev
);
563 unsigned int count
= ch
->combined_count
;
569 netdev_info(dev
, "%s: combined_count=0 not supported\n",
574 if (priv
->params
.num_channels
== count
)
577 mutex_lock(&priv
->state_lock
);
579 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
581 mlx5e_close_locked(dev
);
583 arfs_enabled
= dev
->features
& NETIF_F_NTUPLE
;
585 mlx5e_arfs_disable(priv
);
587 priv
->params
.num_channels
= count
;
588 mlx5e_build_default_indir_rqt(priv
->mdev
, priv
->params
.indirection_rqt
,
589 MLX5E_INDIR_RQT_SIZE
, count
);
592 err
= mlx5e_open_locked(dev
);
597 err
= mlx5e_arfs_enable(priv
);
599 netdev_err(dev
, "%s: mlx5e_arfs_enable failed: %d\n",
604 mutex_unlock(&priv
->state_lock
);
609 static int mlx5e_get_coalesce(struct net_device
*netdev
,
610 struct ethtool_coalesce
*coal
)
612 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
614 if (!MLX5_CAP_GEN(priv
->mdev
, cq_moderation
))
617 coal
->rx_coalesce_usecs
= priv
->params
.rx_cq_moderation
.usec
;
618 coal
->rx_max_coalesced_frames
= priv
->params
.rx_cq_moderation
.pkts
;
619 coal
->tx_coalesce_usecs
= priv
->params
.tx_cq_moderation
.usec
;
620 coal
->tx_max_coalesced_frames
= priv
->params
.tx_cq_moderation
.pkts
;
621 coal
->use_adaptive_rx_coalesce
= priv
->params
.rx_am_enabled
;
626 static int mlx5e_set_coalesce(struct net_device
*netdev
,
627 struct ethtool_coalesce
*coal
)
629 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
630 struct mlx5_core_dev
*mdev
= priv
->mdev
;
631 struct mlx5e_channel
*c
;
633 !!coal
->use_adaptive_rx_coalesce
!= priv
->params
.rx_am_enabled
;
639 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
642 mutex_lock(&priv
->state_lock
);
644 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
645 if (was_opened
&& restart
) {
646 mlx5e_close_locked(netdev
);
647 priv
->params
.rx_am_enabled
= !!coal
->use_adaptive_rx_coalesce
;
650 priv
->params
.tx_cq_moderation
.usec
= coal
->tx_coalesce_usecs
;
651 priv
->params
.tx_cq_moderation
.pkts
= coal
->tx_max_coalesced_frames
;
652 priv
->params
.rx_cq_moderation
.usec
= coal
->rx_coalesce_usecs
;
653 priv
->params
.rx_cq_moderation
.pkts
= coal
->rx_max_coalesced_frames
;
655 if (!was_opened
|| restart
)
658 for (i
= 0; i
< priv
->params
.num_channels
; ++i
) {
659 c
= priv
->channel
[i
];
661 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
662 mlx5_core_modify_cq_moderation(mdev
,
664 coal
->tx_coalesce_usecs
,
665 coal
->tx_max_coalesced_frames
);
668 mlx5_core_modify_cq_moderation(mdev
, &c
->rq
.cq
.mcq
,
669 coal
->rx_coalesce_usecs
,
670 coal
->rx_max_coalesced_frames
);
674 if (was_opened
&& restart
)
675 err
= mlx5e_open_locked(netdev
);
677 mutex_unlock(&priv
->state_lock
);
681 static void ptys2ethtool_supported_link(unsigned long *supported_modes
,
684 unsigned long proto_cap
= eth_proto_cap
;
687 for_each_set_bit(proto
, &proto_cap
, MLX5E_LINK_MODES_NUMBER
)
688 bitmap_or(supported_modes
, supported_modes
,
689 ptys2ethtool_table
[proto
].supported
,
690 __ETHTOOL_LINK_MODE_MASK_NBITS
);
693 static void ptys2ethtool_adver_link(unsigned long *advertising_modes
,
696 unsigned long proto_cap
= eth_proto_cap
;
699 for_each_set_bit(proto
, &proto_cap
, MLX5E_LINK_MODES_NUMBER
)
700 bitmap_or(advertising_modes
, advertising_modes
,
701 ptys2ethtool_table
[proto
].advertised
,
702 __ETHTOOL_LINK_MODE_MASK_NBITS
);
705 static void ptys2ethtool_supported_port(struct ethtool_link_ksettings
*link_ksettings
,
708 if (eth_proto_cap
& (MLX5E_PROT_MASK(MLX5E_10GBASE_CR
)
709 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR
)
710 | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4
)
711 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4
)
712 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4
)
713 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII
))) {
714 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, FIBRE
);
717 if (eth_proto_cap
& (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4
)
718 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4
)
719 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR
)
720 | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4
)
721 | MLX5E_PROT_MASK(MLX5E_1000BASE_KX
))) {
722 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Backplane
);
726 int mlx5e_get_max_linkspeed(struct mlx5_core_dev
*mdev
, u32
*speed
)
733 err
= mlx5_query_port_proto_cap(mdev
, &proto_cap
, MLX5_PTYS_EN
);
737 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
)
738 if (proto_cap
& MLX5E_PROT_MASK(i
))
739 max_speed
= max(max_speed
, ptys2ethtool_table
[i
].speed
);
745 static void get_speed_duplex(struct net_device
*netdev
,
747 struct ethtool_link_ksettings
*link_ksettings
)
750 u32 speed
= SPEED_UNKNOWN
;
751 u8 duplex
= DUPLEX_UNKNOWN
;
753 if (!netif_carrier_ok(netdev
))
756 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
757 if (eth_proto_oper
& MLX5E_PROT_MASK(i
)) {
758 speed
= ptys2ethtool_table
[i
].speed
;
759 duplex
= DUPLEX_FULL
;
764 link_ksettings
->base
.speed
= speed
;
765 link_ksettings
->base
.duplex
= duplex
;
768 static void get_supported(u32 eth_proto_cap
,
769 struct ethtool_link_ksettings
*link_ksettings
)
771 unsigned long *supported
= link_ksettings
->link_modes
.supported
;
773 ptys2ethtool_supported_port(link_ksettings
, eth_proto_cap
);
774 ptys2ethtool_supported_link(supported
, eth_proto_cap
);
775 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Pause
);
776 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Asym_Pause
);
779 static void get_advertising(u32 eth_proto_cap
, u8 tx_pause
,
781 struct ethtool_link_ksettings
*link_ksettings
)
783 unsigned long *advertising
= link_ksettings
->link_modes
.advertising
;
785 ptys2ethtool_adver_link(advertising
, eth_proto_cap
);
787 ethtool_link_ksettings_add_link_mode(link_ksettings
, advertising
, Pause
);
788 if (tx_pause
^ rx_pause
)
789 ethtool_link_ksettings_add_link_mode(link_ksettings
, advertising
, Asym_Pause
);
792 static u8
get_connector_port(u32 eth_proto
)
794 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_10GBASE_SR
)
795 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4
)
796 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4
)
797 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII
))) {
801 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4
)
802 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR
)
803 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4
))) {
807 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4
)
808 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR
)
809 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4
)
810 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4
))) {
817 static void get_lp_advertising(u32 eth_proto_lp
,
818 struct ethtool_link_ksettings
*link_ksettings
)
820 unsigned long *lp_advertising
= link_ksettings
->link_modes
.lp_advertising
;
822 ptys2ethtool_adver_link(lp_advertising
, eth_proto_lp
);
825 static int mlx5e_get_link_ksettings(struct net_device
*netdev
,
826 struct ethtool_link_ksettings
*link_ksettings
)
828 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
829 struct mlx5_core_dev
*mdev
= priv
->mdev
;
830 u32 out
[MLX5_ST_SZ_DW(ptys_reg
)] = {0};
839 err
= mlx5_query_port_ptys(mdev
, out
, sizeof(out
), MLX5_PTYS_EN
, 1);
841 netdev_err(netdev
, "%s: query port ptys failed: %d\n",
846 eth_proto_cap
= MLX5_GET(ptys_reg
, out
, eth_proto_capability
);
847 eth_proto_admin
= MLX5_GET(ptys_reg
, out
, eth_proto_admin
);
848 eth_proto_oper
= MLX5_GET(ptys_reg
, out
, eth_proto_oper
);
849 eth_proto_lp
= MLX5_GET(ptys_reg
, out
, eth_proto_lp_advertise
);
850 an_disable_admin
= MLX5_GET(ptys_reg
, out
, an_disable_admin
);
851 an_status
= MLX5_GET(ptys_reg
, out
, an_status
);
853 ethtool_link_ksettings_zero_link_mode(link_ksettings
, supported
);
854 ethtool_link_ksettings_zero_link_mode(link_ksettings
, advertising
);
856 get_supported(eth_proto_cap
, link_ksettings
);
857 get_advertising(eth_proto_admin
, 0, 0, link_ksettings
);
858 get_speed_duplex(netdev
, eth_proto_oper
, link_ksettings
);
860 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
862 link_ksettings
->base
.port
= get_connector_port(eth_proto_oper
);
863 get_lp_advertising(eth_proto_lp
, link_ksettings
);
865 if (an_status
== MLX5_AN_COMPLETE
)
866 ethtool_link_ksettings_add_link_mode(link_ksettings
,
867 lp_advertising
, Autoneg
);
869 link_ksettings
->base
.autoneg
= an_disable_admin
? AUTONEG_DISABLE
:
871 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
,
873 if (!an_disable_admin
)
874 ethtool_link_ksettings_add_link_mode(link_ksettings
,
875 advertising
, Autoneg
);
881 static u32
mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes
)
883 u32 i
, ptys_modes
= 0;
885 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
886 if (bitmap_intersects(ptys2ethtool_table
[i
].advertised
,
888 __ETHTOOL_LINK_MODE_MASK_NBITS
))
889 ptys_modes
|= MLX5E_PROT_MASK(i
);
895 static u32
mlx5e_ethtool2ptys_speed_link(u32 speed
)
897 u32 i
, speed_links
= 0;
899 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
900 if (ptys2ethtool_table
[i
].speed
== speed
)
901 speed_links
|= MLX5E_PROT_MASK(i
);
907 static int mlx5e_set_link_ksettings(struct net_device
*netdev
,
908 const struct ethtool_link_ksettings
*link_ksettings
)
910 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
911 struct mlx5_core_dev
*mdev
= priv
->mdev
;
912 u32 eth_proto_cap
, eth_proto_admin
;
913 bool an_changes
= false;
922 speed
= link_ksettings
->base
.speed
;
924 link_modes
= link_ksettings
->base
.autoneg
== AUTONEG_ENABLE
?
925 mlx5e_ethtool2ptys_adver_link(link_ksettings
->link_modes
.advertising
) :
926 mlx5e_ethtool2ptys_speed_link(speed
);
928 err
= mlx5_query_port_proto_cap(mdev
, ð_proto_cap
, MLX5_PTYS_EN
);
930 netdev_err(netdev
, "%s: query port eth proto cap failed: %d\n",
935 link_modes
= link_modes
& eth_proto_cap
;
937 netdev_err(netdev
, "%s: Not supported link mode(s) requested",
943 err
= mlx5_query_port_proto_admin(mdev
, ð_proto_admin
, MLX5_PTYS_EN
);
945 netdev_err(netdev
, "%s: query port eth proto admin failed: %d\n",
950 mlx5_query_port_autoneg(mdev
, MLX5_PTYS_EN
, &an_status
,
951 &an_disable_cap
, &an_disable_admin
);
953 an_disable
= link_ksettings
->base
.autoneg
== AUTONEG_DISABLE
;
954 an_changes
= ((!an_disable
&& an_disable_admin
) ||
955 (an_disable
&& !an_disable_admin
));
957 if (!an_changes
&& link_modes
== eth_proto_admin
)
960 mlx5_set_port_ptys(mdev
, an_disable
, link_modes
, MLX5_PTYS_EN
);
961 mlx5_toggle_port_link(mdev
);
967 static u32
mlx5e_get_rxfh_key_size(struct net_device
*netdev
)
969 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
971 return sizeof(priv
->params
.toeplitz_hash_key
);
974 static u32
mlx5e_get_rxfh_indir_size(struct net_device
*netdev
)
976 return MLX5E_INDIR_RQT_SIZE
;
979 static int mlx5e_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
982 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
985 memcpy(indir
, priv
->params
.indirection_rqt
,
986 sizeof(priv
->params
.indirection_rqt
));
989 memcpy(key
, priv
->params
.toeplitz_hash_key
,
990 sizeof(priv
->params
.toeplitz_hash_key
));
993 *hfunc
= priv
->params
.rss_hfunc
;
998 static void mlx5e_modify_tirs_hash(struct mlx5e_priv
*priv
, void *in
, int inlen
)
1000 void *tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1001 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1002 int ctxlen
= MLX5_ST_SZ_BYTES(tirc
);
1005 MLX5_SET(modify_tir_in
, in
, bitmask
.hash
, 1);
1007 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
1008 memset(tirc
, 0, ctxlen
);
1009 mlx5e_build_indir_tir_ctx_hash(priv
, tirc
, tt
);
1010 mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
, inlen
);
1014 static int mlx5e_set_rxfh(struct net_device
*dev
, const u32
*indir
,
1015 const u8
*key
, const u8 hfunc
)
1017 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1018 int inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1019 bool hash_changed
= false;
1022 if ((hfunc
!= ETH_RSS_HASH_NO_CHANGE
) &&
1023 (hfunc
!= ETH_RSS_HASH_XOR
) &&
1024 (hfunc
!= ETH_RSS_HASH_TOP
))
1027 in
= mlx5_vzalloc(inlen
);
1031 mutex_lock(&priv
->state_lock
);
1034 u32 rqtn
= priv
->indir_rqt
.rqtn
;
1036 memcpy(priv
->params
.indirection_rqt
, indir
,
1037 sizeof(priv
->params
.indirection_rqt
));
1038 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, 0);
1041 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
1042 hfunc
!= priv
->params
.rss_hfunc
) {
1043 priv
->params
.rss_hfunc
= hfunc
;
1044 hash_changed
= true;
1048 memcpy(priv
->params
.toeplitz_hash_key
, key
,
1049 sizeof(priv
->params
.toeplitz_hash_key
));
1050 hash_changed
= hash_changed
||
1051 priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
;
1055 mlx5e_modify_tirs_hash(priv
, in
, inlen
);
1057 mutex_unlock(&priv
->state_lock
);
1064 static int mlx5e_get_rxnfc(struct net_device
*netdev
,
1065 struct ethtool_rxnfc
*info
, u32
*rule_locs
)
1067 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1070 switch (info
->cmd
) {
1071 case ETHTOOL_GRXRINGS
:
1072 info
->data
= priv
->params
.num_channels
;
1074 case ETHTOOL_GRXCLSRLCNT
:
1075 info
->rule_cnt
= priv
->fs
.ethtool
.tot_num_rules
;
1077 case ETHTOOL_GRXCLSRULE
:
1078 err
= mlx5e_ethtool_get_flow(priv
, info
, info
->fs
.location
);
1080 case ETHTOOL_GRXCLSRLALL
:
1081 err
= mlx5e_ethtool_get_all_flows(priv
, info
, rule_locs
);
1091 static int mlx5e_get_tunable(struct net_device
*dev
,
1092 const struct ethtool_tunable
*tuna
,
1095 const struct mlx5e_priv
*priv
= netdev_priv(dev
);
1099 case ETHTOOL_TX_COPYBREAK
:
1100 *(u32
*)data
= priv
->params
.tx_max_inline
;
1110 static int mlx5e_set_tunable(struct net_device
*dev
,
1111 const struct ethtool_tunable
*tuna
,
1114 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1115 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1121 case ETHTOOL_TX_COPYBREAK
:
1123 if (val
> mlx5e_get_max_inline_cap(mdev
)) {
1128 mutex_lock(&priv
->state_lock
);
1130 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1132 mlx5e_close_locked(dev
);
1134 priv
->params
.tx_max_inline
= val
;
1137 err
= mlx5e_open_locked(dev
);
1139 mutex_unlock(&priv
->state_lock
);
1149 static void mlx5e_get_pauseparam(struct net_device
*netdev
,
1150 struct ethtool_pauseparam
*pauseparam
)
1152 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1153 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1156 err
= mlx5_query_port_pause(mdev
, &pauseparam
->rx_pause
,
1157 &pauseparam
->tx_pause
);
1159 netdev_err(netdev
, "%s: mlx5_query_port_pause failed:0x%x\n",
1164 static int mlx5e_set_pauseparam(struct net_device
*netdev
,
1165 struct ethtool_pauseparam
*pauseparam
)
1167 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1168 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1171 if (pauseparam
->autoneg
)
1174 err
= mlx5_set_port_pause(mdev
,
1175 pauseparam
->rx_pause
? 1 : 0,
1176 pauseparam
->tx_pause
? 1 : 0);
1178 netdev_err(netdev
, "%s: mlx5_set_port_pause failed:0x%x\n",
1185 static int mlx5e_get_ts_info(struct net_device
*dev
,
1186 struct ethtool_ts_info
*info
)
1188 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1191 ret
= ethtool_op_get_ts_info(dev
, info
);
1195 info
->phc_index
= priv
->tstamp
.ptp
?
1196 ptp_clock_index(priv
->tstamp
.ptp
) : -1;
1198 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
1201 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
1202 SOF_TIMESTAMPING_RX_HARDWARE
|
1203 SOF_TIMESTAMPING_RAW_HARDWARE
;
1205 info
->tx_types
= (BIT(1) << HWTSTAMP_TX_OFF
) |
1206 (BIT(1) << HWTSTAMP_TX_ON
);
1208 info
->rx_filters
= (BIT(1) << HWTSTAMP_FILTER_NONE
) |
1209 (BIT(1) << HWTSTAMP_FILTER_ALL
);
1214 static __u32
mlx5e_get_wol_supported(struct mlx5_core_dev
*mdev
)
1218 if (MLX5_CAP_GEN(mdev
, wol_g
))
1221 if (MLX5_CAP_GEN(mdev
, wol_s
))
1222 ret
|= WAKE_MAGICSECURE
;
1224 if (MLX5_CAP_GEN(mdev
, wol_a
))
1227 if (MLX5_CAP_GEN(mdev
, wol_b
))
1230 if (MLX5_CAP_GEN(mdev
, wol_m
))
1233 if (MLX5_CAP_GEN(mdev
, wol_u
))
1236 if (MLX5_CAP_GEN(mdev
, wol_p
))
1242 static __u32
mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode
)
1246 if (mode
& MLX5_WOL_MAGIC
)
1249 if (mode
& MLX5_WOL_SECURED_MAGIC
)
1250 ret
|= WAKE_MAGICSECURE
;
1252 if (mode
& MLX5_WOL_ARP
)
1255 if (mode
& MLX5_WOL_BROADCAST
)
1258 if (mode
& MLX5_WOL_MULTICAST
)
1261 if (mode
& MLX5_WOL_UNICAST
)
1264 if (mode
& MLX5_WOL_PHY_ACTIVITY
)
1270 static u8
mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode
)
1274 if (mode
& WAKE_MAGIC
)
1275 ret
|= MLX5_WOL_MAGIC
;
1277 if (mode
& WAKE_MAGICSECURE
)
1278 ret
|= MLX5_WOL_SECURED_MAGIC
;
1280 if (mode
& WAKE_ARP
)
1281 ret
|= MLX5_WOL_ARP
;
1283 if (mode
& WAKE_BCAST
)
1284 ret
|= MLX5_WOL_BROADCAST
;
1286 if (mode
& WAKE_MCAST
)
1287 ret
|= MLX5_WOL_MULTICAST
;
1289 if (mode
& WAKE_UCAST
)
1290 ret
|= MLX5_WOL_UNICAST
;
1292 if (mode
& WAKE_PHY
)
1293 ret
|= MLX5_WOL_PHY_ACTIVITY
;
1298 static void mlx5e_get_wol(struct net_device
*netdev
,
1299 struct ethtool_wolinfo
*wol
)
1301 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1302 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1306 memset(wol
, 0, sizeof(*wol
));
1308 wol
->supported
= mlx5e_get_wol_supported(mdev
);
1309 if (!wol
->supported
)
1312 err
= mlx5_query_port_wol(mdev
, &mlx5_wol_mode
);
1316 wol
->wolopts
= mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode
);
1319 static int mlx5e_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
1321 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1322 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1323 __u32 wol_supported
= mlx5e_get_wol_supported(mdev
);
1329 if (wol
->wolopts
& ~wol_supported
)
1332 mlx5_wol_mode
= mlx5e_refomrat_wol_mode_linux_to_mlx5(wol
->wolopts
);
1334 return mlx5_set_port_wol(mdev
, mlx5_wol_mode
);
1337 static int mlx5e_set_phys_id(struct net_device
*dev
,
1338 enum ethtool_phys_id_state state
)
1340 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1341 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1342 u16 beacon_duration
;
1344 if (!MLX5_CAP_GEN(mdev
, beacon_led
))
1348 case ETHTOOL_ID_ACTIVE
:
1349 beacon_duration
= MLX5_BEACON_DURATION_INF
;
1351 case ETHTOOL_ID_INACTIVE
:
1352 beacon_duration
= MLX5_BEACON_DURATION_OFF
;
1358 return mlx5_set_port_beacon(mdev
, beacon_duration
);
1361 static int mlx5e_get_module_info(struct net_device
*netdev
,
1362 struct ethtool_modinfo
*modinfo
)
1364 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1365 struct mlx5_core_dev
*dev
= priv
->mdev
;
1369 size_read
= mlx5_query_module_eeprom(dev
, 0, 2, data
);
1373 /* data[0] = identifier byte */
1375 case MLX5_MODULE_ID_QSFP
:
1376 modinfo
->type
= ETH_MODULE_SFF_8436
;
1377 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1379 case MLX5_MODULE_ID_QSFP_PLUS
:
1380 case MLX5_MODULE_ID_QSFP28
:
1381 /* data[1] = revision id */
1382 if (data
[0] == MLX5_MODULE_ID_QSFP28
|| data
[1] >= 0x3) {
1383 modinfo
->type
= ETH_MODULE_SFF_8636
;
1384 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1386 modinfo
->type
= ETH_MODULE_SFF_8436
;
1387 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1390 case MLX5_MODULE_ID_SFP
:
1391 modinfo
->type
= ETH_MODULE_SFF_8472
;
1392 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1395 netdev_err(priv
->netdev
, "%s: cable type not recognized:0x%x\n",
1403 static int mlx5e_get_module_eeprom(struct net_device
*netdev
,
1404 struct ethtool_eeprom
*ee
,
1407 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1408 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1409 int offset
= ee
->offset
;
1416 memset(data
, 0, ee
->len
);
1418 while (i
< ee
->len
) {
1419 size_read
= mlx5_query_module_eeprom(mdev
, offset
, ee
->len
- i
,
1426 if (size_read
< 0) {
1427 netdev_err(priv
->netdev
, "%s: mlx5_query_eeprom failed:0x%x\n",
1428 __func__
, size_read
);
1433 offset
+= size_read
;
1439 typedef int (*mlx5e_pflag_handler
)(struct net_device
*netdev
, bool enable
);
1441 static int set_pflag_rx_cqe_based_moder(struct net_device
*netdev
, bool enable
)
1443 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1444 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1445 bool rx_mode_changed
;
1446 u8 rx_cq_period_mode
;
1450 rx_cq_period_mode
= enable
?
1451 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
1452 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1453 rx_mode_changed
= rx_cq_period_mode
!= priv
->params
.rx_cq_period_mode
;
1455 if (rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
&&
1456 !MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
))
1459 if (!rx_mode_changed
)
1462 reset
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1464 mlx5e_close_locked(netdev
);
1466 mlx5e_set_rx_cq_mode_params(&priv
->params
, rx_cq_period_mode
);
1469 err
= mlx5e_open_locked(netdev
);
1474 static int set_pflag_rx_cqe_compress(struct net_device
*netdev
,
1477 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1478 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1480 if (!MLX5_CAP_GEN(mdev
, cqe_compression
))
1483 if (enable
&& priv
->tstamp
.hwtstamp_config
.rx_filter
!= HWTSTAMP_FILTER_NONE
) {
1484 netdev_err(netdev
, "Can't enable cqe compression while timestamping is enabled.\n");
1488 mlx5e_modify_rx_cqe_compression_locked(priv
, enable
);
1489 priv
->params
.rx_cqe_compress_def
= enable
;
1490 mlx5e_set_rq_type_params(priv
, priv
->params
.rq_wq_type
);
1495 static int mlx5e_handle_pflag(struct net_device
*netdev
,
1497 enum mlx5e_priv_flag flag
,
1498 mlx5e_pflag_handler pflag_handler
)
1500 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1501 bool enable
= !!(wanted_flags
& flag
);
1502 u32 changes
= wanted_flags
^ priv
->params
.pflags
;
1505 if (!(changes
& flag
))
1508 err
= pflag_handler(netdev
, enable
);
1510 netdev_err(netdev
, "%s private flag 0x%x failed err %d\n",
1511 enable
? "Enable" : "Disable", flag
, err
);
1515 MLX5E_SET_PFLAG(priv
, flag
, enable
);
1519 static int mlx5e_set_priv_flags(struct net_device
*netdev
, u32 pflags
)
1521 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1524 mutex_lock(&priv
->state_lock
);
1525 err
= mlx5e_handle_pflag(netdev
, pflags
,
1526 MLX5E_PFLAG_RX_CQE_BASED_MODER
,
1527 set_pflag_rx_cqe_based_moder
);
1531 err
= mlx5e_handle_pflag(netdev
, pflags
,
1532 MLX5E_PFLAG_RX_CQE_COMPRESS
,
1533 set_pflag_rx_cqe_compress
);
1536 mutex_unlock(&priv
->state_lock
);
1540 static u32
mlx5e_get_priv_flags(struct net_device
*netdev
)
1542 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1544 return priv
->params
.pflags
;
1547 static int mlx5e_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1550 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1553 case ETHTOOL_SRXCLSRLINS
:
1554 err
= mlx5e_ethtool_flow_replace(priv
, &cmd
->fs
);
1556 case ETHTOOL_SRXCLSRLDEL
:
1557 err
= mlx5e_ethtool_flow_remove(priv
, cmd
->fs
.location
);
1567 const struct ethtool_ops mlx5e_ethtool_ops
= {
1568 .get_drvinfo
= mlx5e_get_drvinfo
,
1569 .get_link
= ethtool_op_get_link
,
1570 .get_strings
= mlx5e_get_strings
,
1571 .get_sset_count
= mlx5e_get_sset_count
,
1572 .get_ethtool_stats
= mlx5e_get_ethtool_stats
,
1573 .get_ringparam
= mlx5e_get_ringparam
,
1574 .set_ringparam
= mlx5e_set_ringparam
,
1575 .get_channels
= mlx5e_get_channels
,
1576 .set_channels
= mlx5e_set_channels
,
1577 .get_coalesce
= mlx5e_get_coalesce
,
1578 .set_coalesce
= mlx5e_set_coalesce
,
1579 .get_link_ksettings
= mlx5e_get_link_ksettings
,
1580 .set_link_ksettings
= mlx5e_set_link_ksettings
,
1581 .get_rxfh_key_size
= mlx5e_get_rxfh_key_size
,
1582 .get_rxfh_indir_size
= mlx5e_get_rxfh_indir_size
,
1583 .get_rxfh
= mlx5e_get_rxfh
,
1584 .set_rxfh
= mlx5e_set_rxfh
,
1585 .get_rxnfc
= mlx5e_get_rxnfc
,
1586 .set_rxnfc
= mlx5e_set_rxnfc
,
1587 .get_tunable
= mlx5e_get_tunable
,
1588 .set_tunable
= mlx5e_set_tunable
,
1589 .get_pauseparam
= mlx5e_get_pauseparam
,
1590 .set_pauseparam
= mlx5e_set_pauseparam
,
1591 .get_ts_info
= mlx5e_get_ts_info
,
1592 .set_phys_id
= mlx5e_set_phys_id
,
1593 .get_wol
= mlx5e_get_wol
,
1594 .set_wol
= mlx5e_set_wol
,
1595 .get_module_info
= mlx5e_get_module_info
,
1596 .get_module_eeprom
= mlx5e_get_module_eeprom
,
1597 .get_priv_flags
= mlx5e_get_priv_flags
,
1598 .set_priv_flags
= mlx5e_set_priv_flags
,
1599 .self_test
= mlx5e_self_test
,