2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static void mlx5e_get_drvinfo(struct net_device
*dev
,
36 struct ethtool_drvinfo
*drvinfo
)
38 struct mlx5e_priv
*priv
= netdev_priv(dev
);
39 struct mlx5_core_dev
*mdev
= priv
->mdev
;
41 strlcpy(drvinfo
->driver
, DRIVER_NAME
, sizeof(drvinfo
->driver
));
42 strlcpy(drvinfo
->version
, DRIVER_VERSION
" (" DRIVER_RELDATE
")",
43 sizeof(drvinfo
->version
));
44 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
46 fw_rev_maj(mdev
), fw_rev_min(mdev
), fw_rev_sub(mdev
),
48 strlcpy(drvinfo
->bus_info
, pci_name(mdev
->pdev
),
49 sizeof(drvinfo
->bus_info
));
52 struct ptys2ethtool_config
{
53 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported
);
54 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised
);
58 static struct ptys2ethtool_config ptys2ethtool_table
[MLX5E_LINK_MODES_NUMBER
];
60 #define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
62 struct ptys2ethtool_config *cfg; \
63 const unsigned int modes[] = { __VA_ARGS__ }; \
65 cfg = &ptys2ethtool_table[reg_]; \
66 cfg->speed = speed_; \
67 bitmap_zero(cfg->supported, \
68 __ETHTOOL_LINK_MODE_MASK_NBITS); \
69 bitmap_zero(cfg->advertised, \
70 __ETHTOOL_LINK_MODE_MASK_NBITS); \
71 for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
72 __set_bit(modes[i], cfg->supported); \
73 __set_bit(modes[i], cfg->advertised); \
77 void mlx5e_build_ptys2ethtool_map(void)
79 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII
, SPEED_1000
,
80 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
);
81 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX
, SPEED_1000
,
82 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
);
83 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4
, SPEED_10000
,
84 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
);
85 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4
, SPEED_10000
,
86 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
);
87 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR
, SPEED_10000
,
88 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
89 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2
, SPEED_20000
,
90 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
);
91 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4
, SPEED_40000
,
92 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
);
93 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4
, SPEED_40000
,
94 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
);
95 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4
, SPEED_56000
,
96 ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
);
97 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR
, SPEED_10000
,
98 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
99 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR
, SPEED_10000
,
100 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
101 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER
, SPEED_10000
,
102 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
);
103 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4
, SPEED_40000
,
104 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
);
105 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4
, SPEED_40000
,
106 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
);
107 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2
, SPEED_50000
,
108 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
);
109 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4
, SPEED_100000
,
110 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
);
111 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4
, SPEED_100000
,
112 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
);
113 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4
, SPEED_100000
,
114 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
);
115 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4
, SPEED_100000
,
116 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
);
117 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T
, SPEED_10000
,
118 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
);
119 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR
, SPEED_25000
,
120 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
);
121 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR
, SPEED_25000
,
122 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
);
123 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR
, SPEED_25000
,
124 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
);
125 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2
, SPEED_50000
,
126 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
);
127 MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2
, SPEED_50000
,
128 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
);
131 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv
*priv
)
133 struct mlx5_core_dev
*mdev
= priv
->mdev
;
138 err
= mlx5_query_port_pfc(mdev
, &pfc_en_tx
, &pfc_en_rx
);
140 return err
? 0 : pfc_en_tx
| pfc_en_rx
;
143 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv
*priv
)
145 struct mlx5_core_dev
*mdev
= priv
->mdev
;
150 err
= mlx5_query_port_pause(mdev
, &rx_pause
, &tx_pause
);
152 return err
? false : rx_pause
| tx_pause
;
155 #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
156 #define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
157 #define MLX5E_NUM_SQ_STATS(priv) \
158 (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
159 #define MLX5E_NUM_PFC_COUNTERS(priv) \
160 ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
161 NUM_PPORT_PER_PRIO_PFC_COUNTERS)
163 static int mlx5e_get_sset_count(struct net_device
*dev
, int sset
)
165 struct mlx5e_priv
*priv
= netdev_priv(dev
);
169 return NUM_SW_COUNTERS
+
170 MLX5E_NUM_Q_CNTRS(priv
) +
171 NUM_VPORT_COUNTERS
+ NUM_PPORT_COUNTERS(priv
) +
172 NUM_PCIE_COUNTERS(priv
) +
173 MLX5E_NUM_RQ_STATS(priv
) +
174 MLX5E_NUM_SQ_STATS(priv
) +
175 MLX5E_NUM_PFC_COUNTERS(priv
) +
176 ARRAY_SIZE(mlx5e_pme_status_desc
) +
177 ARRAY_SIZE(mlx5e_pme_error_desc
);
179 case ETH_SS_PRIV_FLAGS
:
180 return ARRAY_SIZE(mlx5e_priv_flags
);
182 return mlx5e_self_test_num(priv
);
189 static void mlx5e_fill_stats_strings(struct mlx5e_priv
*priv
, uint8_t *data
)
191 int i
, j
, tc
, prio
, idx
= 0;
192 unsigned long pfc_combined
;
195 for (i
= 0; i
< NUM_SW_COUNTERS
; i
++)
196 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, sw_stats_desc
[i
].format
);
199 for (i
= 0; i
< MLX5E_NUM_Q_CNTRS(priv
); i
++)
200 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, q_stats_desc
[i
].format
);
203 for (i
= 0; i
< NUM_VPORT_COUNTERS
; i
++)
204 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
205 vport_stats_desc
[i
].format
);
208 for (i
= 0; i
< NUM_PPORT_802_3_COUNTERS
; i
++)
209 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
210 pport_802_3_stats_desc
[i
].format
);
212 for (i
= 0; i
< NUM_PPORT_2863_COUNTERS
; i
++)
213 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
214 pport_2863_stats_desc
[i
].format
);
216 for (i
= 0; i
< NUM_PPORT_2819_COUNTERS
; i
++)
217 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
218 pport_2819_stats_desc
[i
].format
);
220 for (i
= 0; i
< NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv
); i
++)
221 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
222 pport_phy_statistical_stats_desc
[i
].format
);
224 for (i
= 0; i
< NUM_PCIE_PERF_COUNTERS(priv
); i
++)
225 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
226 pcie_perf_stats_desc
[i
].format
);
228 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
229 for (i
= 0; i
< NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS
; i
++)
230 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
231 pport_per_prio_traffic_stats_desc
[i
].format
, prio
);
234 pfc_combined
= mlx5e_query_pfc_combined(priv
);
235 for_each_set_bit(prio
, &pfc_combined
, NUM_PPORT_PRIO
) {
236 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
237 char pfc_string
[ETH_GSTRING_LEN
];
239 snprintf(pfc_string
, sizeof(pfc_string
), "prio%d", prio
);
240 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
241 pport_per_prio_pfc_stats_desc
[i
].format
, pfc_string
);
245 if (mlx5e_query_global_pause_combined(priv
)) {
246 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
247 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
248 pport_per_prio_pfc_stats_desc
[i
].format
, "global");
252 /* port module event counters */
253 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_status_desc
); i
++)
254 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, mlx5e_pme_status_desc
[i
].format
);
256 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_error_desc
); i
++)
257 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, mlx5e_pme_error_desc
[i
].format
);
259 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
262 /* per channel counters */
263 for (i
= 0; i
< priv
->channels
.num
; i
++)
264 for (j
= 0; j
< NUM_RQ_STATS
; j
++)
265 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
266 rq_stats_desc
[j
].format
, i
);
268 for (tc
= 0; tc
< priv
->channels
.params
.num_tc
; tc
++)
269 for (i
= 0; i
< priv
->channels
.num
; i
++)
270 for (j
= 0; j
< NUM_SQ_STATS
; j
++)
271 sprintf(data
+ (idx
++) * ETH_GSTRING_LEN
,
272 sq_stats_desc
[j
].format
,
273 priv
->channel_tc2txq
[i
][tc
]);
276 static void mlx5e_get_strings(struct net_device
*dev
,
277 uint32_t stringset
, uint8_t *data
)
279 struct mlx5e_priv
*priv
= netdev_priv(dev
);
283 case ETH_SS_PRIV_FLAGS
:
284 for (i
= 0; i
< ARRAY_SIZE(mlx5e_priv_flags
); i
++)
285 strcpy(data
+ i
* ETH_GSTRING_LEN
, mlx5e_priv_flags
[i
]);
289 for (i
= 0; i
< mlx5e_self_test_num(priv
); i
++)
290 strcpy(data
+ i
* ETH_GSTRING_LEN
,
291 mlx5e_self_tests
[i
]);
295 mlx5e_fill_stats_strings(priv
, data
);
300 static void mlx5e_get_ethtool_stats(struct net_device
*dev
,
301 struct ethtool_stats
*stats
, u64
*data
)
303 struct mlx5e_priv
*priv
= netdev_priv(dev
);
304 struct mlx5e_channels
*channels
;
305 struct mlx5_priv
*mlx5_priv
;
306 int i
, j
, tc
, prio
, idx
= 0;
307 unsigned long pfc_combined
;
312 mutex_lock(&priv
->state_lock
);
313 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
314 mlx5e_update_stats(priv
);
315 channels
= &priv
->channels
;
316 mutex_unlock(&priv
->state_lock
);
318 for (i
= 0; i
< NUM_SW_COUNTERS
; i
++)
319 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.sw
,
322 for (i
= 0; i
< MLX5E_NUM_Q_CNTRS(priv
); i
++)
323 data
[idx
++] = MLX5E_READ_CTR32_CPU(&priv
->stats
.qcnt
,
326 for (i
= 0; i
< NUM_VPORT_COUNTERS
; i
++)
327 data
[idx
++] = MLX5E_READ_CTR64_BE(priv
->stats
.vport
.query_vport_out
,
328 vport_stats_desc
, i
);
330 for (i
= 0; i
< NUM_PPORT_802_3_COUNTERS
; i
++)
331 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.IEEE_802_3_counters
,
332 pport_802_3_stats_desc
, i
);
334 for (i
= 0; i
< NUM_PPORT_2863_COUNTERS
; i
++)
335 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.RFC_2863_counters
,
336 pport_2863_stats_desc
, i
);
338 for (i
= 0; i
< NUM_PPORT_2819_COUNTERS
; i
++)
339 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.RFC_2819_counters
,
340 pport_2819_stats_desc
, i
);
342 for (i
= 0; i
< NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv
); i
++)
343 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.phy_statistical_counters
,
344 pport_phy_statistical_stats_desc
, i
);
346 for (i
= 0; i
< NUM_PCIE_PERF_COUNTERS(priv
); i
++)
347 data
[idx
++] = MLX5E_READ_CTR32_BE(&priv
->stats
.pcie
.pcie_perf_counters
,
348 pcie_perf_stats_desc
, i
);
350 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
351 for (i
= 0; i
< NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS
; i
++)
352 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[prio
],
353 pport_per_prio_traffic_stats_desc
, i
);
356 pfc_combined
= mlx5e_query_pfc_combined(priv
);
357 for_each_set_bit(prio
, &pfc_combined
, NUM_PPORT_PRIO
) {
358 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
359 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[prio
],
360 pport_per_prio_pfc_stats_desc
, i
);
364 if (mlx5e_query_global_pause_combined(priv
)) {
365 for (i
= 0; i
< NUM_PPORT_PER_PRIO_PFC_COUNTERS
; i
++) {
366 data
[idx
++] = MLX5E_READ_CTR64_BE(&priv
->stats
.pport
.per_prio_counters
[0],
367 pport_per_prio_pfc_stats_desc
, i
);
371 /* port module event counters */
372 mlx5_priv
= &priv
->mdev
->priv
;
373 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_status_desc
); i
++)
374 data
[idx
++] = MLX5E_READ_CTR64_CPU(mlx5_priv
->pme_stats
.status_counters
,
375 mlx5e_pme_status_desc
, i
);
377 for (i
= 0; i
< ARRAY_SIZE(mlx5e_pme_error_desc
); i
++)
378 data
[idx
++] = MLX5E_READ_CTR64_CPU(mlx5_priv
->pme_stats
.error_counters
,
379 mlx5e_pme_error_desc
, i
);
381 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
384 /* per channel counters */
385 for (i
= 0; i
< channels
->num
; i
++)
386 for (j
= 0; j
< NUM_RQ_STATS
; j
++)
388 MLX5E_READ_CTR64_CPU(&channels
->c
[i
]->rq
.stats
,
391 for (tc
= 0; tc
< priv
->channels
.params
.num_tc
; tc
++)
392 for (i
= 0; i
< channels
->num
; i
++)
393 for (j
= 0; j
< NUM_SQ_STATS
; j
++)
394 data
[idx
++] = MLX5E_READ_CTR64_CPU(&channels
->c
[i
]->sq
[tc
].stats
,
398 static u32
mlx5e_rx_wqes_to_packets(struct mlx5e_priv
*priv
, int rq_wq_type
,
406 if (rq_wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
409 stride_size
= 1 << priv
->channels
.params
.mpwqe_log_stride_sz
;
410 num_strides
= 1 << priv
->channels
.params
.mpwqe_log_num_strides
;
411 wqe_size
= stride_size
* num_strides
;
413 packets_per_wqe
= wqe_size
/
414 ALIGN(ETH_DATA_LEN
, stride_size
);
415 return (1 << (order_base_2(num_wqe
* packets_per_wqe
) - 1));
418 static u32
mlx5e_packets_to_rx_wqes(struct mlx5e_priv
*priv
, int rq_wq_type
,
427 if (rq_wq_type
!= MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
430 stride_size
= 1 << priv
->channels
.params
.mpwqe_log_stride_sz
;
431 num_strides
= 1 << priv
->channels
.params
.mpwqe_log_num_strides
;
432 wqe_size
= stride_size
* num_strides
;
434 num_packets
= (1 << order_base_2(num_packets
));
436 packets_per_wqe
= wqe_size
/
437 ALIGN(ETH_DATA_LEN
, stride_size
);
438 num_wqes
= DIV_ROUND_UP(num_packets
, packets_per_wqe
);
439 return 1 << (order_base_2(num_wqes
));
442 static void mlx5e_get_ringparam(struct net_device
*dev
,
443 struct ethtool_ringparam
*param
)
445 struct mlx5e_priv
*priv
= netdev_priv(dev
);
446 int rq_wq_type
= priv
->channels
.params
.rq_wq_type
;
448 param
->rx_max_pending
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
449 1 << mlx5_max_log_rq_size(rq_wq_type
));
450 param
->tx_max_pending
= 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
;
451 param
->rx_pending
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
452 1 << priv
->channels
.params
.log_rq_size
);
453 param
->tx_pending
= 1 << priv
->channels
.params
.log_sq_size
;
456 static int mlx5e_set_ringparam(struct net_device
*dev
,
457 struct ethtool_ringparam
*param
)
459 struct mlx5e_priv
*priv
= netdev_priv(dev
);
460 int rq_wq_type
= priv
->channels
.params
.rq_wq_type
;
461 struct mlx5e_channels new_channels
= {};
470 if (param
->rx_jumbo_pending
) {
471 netdev_info(dev
, "%s: rx_jumbo_pending not supported\n",
475 if (param
->rx_mini_pending
) {
476 netdev_info(dev
, "%s: rx_mini_pending not supported\n",
481 min_rq_size
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
482 1 << mlx5_min_log_rq_size(rq_wq_type
));
483 max_rq_size
= mlx5e_rx_wqes_to_packets(priv
, rq_wq_type
,
484 1 << mlx5_max_log_rq_size(rq_wq_type
));
485 rx_pending_wqes
= mlx5e_packets_to_rx_wqes(priv
, rq_wq_type
,
488 if (param
->rx_pending
< min_rq_size
) {
489 netdev_info(dev
, "%s: rx_pending (%d) < min (%d)\n",
490 __func__
, param
->rx_pending
,
494 if (param
->rx_pending
> max_rq_size
) {
495 netdev_info(dev
, "%s: rx_pending (%d) > max (%d)\n",
496 __func__
, param
->rx_pending
,
501 num_mtts
= MLX5E_REQUIRED_MTTS(rx_pending_wqes
);
502 if (priv
->channels
.params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
&&
503 !MLX5E_VALID_NUM_MTTS(num_mtts
)) {
504 netdev_info(dev
, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
505 __func__
, param
->rx_pending
);
509 if (param
->tx_pending
< (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
)) {
510 netdev_info(dev
, "%s: tx_pending (%d) < min (%d)\n",
511 __func__
, param
->tx_pending
,
512 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
);
515 if (param
->tx_pending
> (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
)) {
516 netdev_info(dev
, "%s: tx_pending (%d) > max (%d)\n",
517 __func__
, param
->tx_pending
,
518 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE
);
522 log_rq_size
= order_base_2(rx_pending_wqes
);
523 log_sq_size
= order_base_2(param
->tx_pending
);
525 if (log_rq_size
== priv
->channels
.params
.log_rq_size
&&
526 log_sq_size
== priv
->channels
.params
.log_sq_size
)
529 mutex_lock(&priv
->state_lock
);
531 new_channels
.params
= priv
->channels
.params
;
532 new_channels
.params
.log_rq_size
= log_rq_size
;
533 new_channels
.params
.log_sq_size
= log_sq_size
;
535 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
536 priv
->channels
.params
= new_channels
.params
;
540 err
= mlx5e_open_channels(priv
, &new_channels
);
544 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
547 mutex_unlock(&priv
->state_lock
);
552 static void mlx5e_get_channels(struct net_device
*dev
,
553 struct ethtool_channels
*ch
)
555 struct mlx5e_priv
*priv
= netdev_priv(dev
);
557 ch
->max_combined
= priv
->profile
->max_nch(priv
->mdev
);
558 ch
->combined_count
= priv
->channels
.params
.num_channels
;
561 static int mlx5e_set_channels(struct net_device
*dev
,
562 struct ethtool_channels
*ch
)
564 struct mlx5e_priv
*priv
= netdev_priv(dev
);
565 unsigned int count
= ch
->combined_count
;
566 struct mlx5e_channels new_channels
= {};
571 netdev_info(dev
, "%s: combined_count=0 not supported\n",
576 if (priv
->channels
.params
.num_channels
== count
)
579 mutex_lock(&priv
->state_lock
);
581 new_channels
.params
= priv
->channels
.params
;
582 new_channels
.params
.num_channels
= count
;
583 mlx5e_build_default_indir_rqt(priv
->mdev
, new_channels
.params
.indirection_rqt
,
584 MLX5E_INDIR_RQT_SIZE
, count
);
586 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
587 priv
->channels
.params
= new_channels
.params
;
591 /* Create fresh channels with new parameters */
592 err
= mlx5e_open_channels(priv
, &new_channels
);
596 arfs_enabled
= dev
->features
& NETIF_F_NTUPLE
;
598 mlx5e_arfs_disable(priv
);
600 /* Switch to new channels, set new parameters and close old ones */
601 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
604 err
= mlx5e_arfs_enable(priv
);
606 netdev_err(dev
, "%s: mlx5e_arfs_enable failed: %d\n",
611 mutex_unlock(&priv
->state_lock
);
616 static int mlx5e_get_coalesce(struct net_device
*netdev
,
617 struct ethtool_coalesce
*coal
)
619 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
621 if (!MLX5_CAP_GEN(priv
->mdev
, cq_moderation
))
624 coal
->rx_coalesce_usecs
= priv
->channels
.params
.rx_cq_moderation
.usec
;
625 coal
->rx_max_coalesced_frames
= priv
->channels
.params
.rx_cq_moderation
.pkts
;
626 coal
->tx_coalesce_usecs
= priv
->channels
.params
.tx_cq_moderation
.usec
;
627 coal
->tx_max_coalesced_frames
= priv
->channels
.params
.tx_cq_moderation
.pkts
;
628 coal
->use_adaptive_rx_coalesce
= priv
->channels
.params
.rx_am_enabled
;
634 mlx5e_set_priv_channels_coalesce(struct mlx5e_priv
*priv
, struct ethtool_coalesce
*coal
)
636 struct mlx5_core_dev
*mdev
= priv
->mdev
;
640 for (i
= 0; i
< priv
->channels
.num
; ++i
) {
641 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
643 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
644 mlx5_core_modify_cq_moderation(mdev
,
646 coal
->tx_coalesce_usecs
,
647 coal
->tx_max_coalesced_frames
);
650 mlx5_core_modify_cq_moderation(mdev
, &c
->rq
.cq
.mcq
,
651 coal
->rx_coalesce_usecs
,
652 coal
->rx_max_coalesced_frames
);
656 static int mlx5e_set_coalesce(struct net_device
*netdev
,
657 struct ethtool_coalesce
*coal
)
659 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
660 struct mlx5_core_dev
*mdev
= priv
->mdev
;
661 struct mlx5e_channels new_channels
= {};
665 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
668 mutex_lock(&priv
->state_lock
);
669 new_channels
.params
= priv
->channels
.params
;
671 new_channels
.params
.tx_cq_moderation
.usec
= coal
->tx_coalesce_usecs
;
672 new_channels
.params
.tx_cq_moderation
.pkts
= coal
->tx_max_coalesced_frames
;
673 new_channels
.params
.rx_cq_moderation
.usec
= coal
->rx_coalesce_usecs
;
674 new_channels
.params
.rx_cq_moderation
.pkts
= coal
->rx_max_coalesced_frames
;
675 new_channels
.params
.rx_am_enabled
= !!coal
->use_adaptive_rx_coalesce
;
677 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
678 priv
->channels
.params
= new_channels
.params
;
683 reset
= !!coal
->use_adaptive_rx_coalesce
!= priv
->channels
.params
.rx_am_enabled
;
685 mlx5e_set_priv_channels_coalesce(priv
, coal
);
686 priv
->channels
.params
= new_channels
.params
;
690 /* open fresh channels with new coal parameters */
691 err
= mlx5e_open_channels(priv
, &new_channels
);
695 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
698 mutex_unlock(&priv
->state_lock
);
702 static void ptys2ethtool_supported_link(unsigned long *supported_modes
,
705 unsigned long proto_cap
= eth_proto_cap
;
708 for_each_set_bit(proto
, &proto_cap
, MLX5E_LINK_MODES_NUMBER
)
709 bitmap_or(supported_modes
, supported_modes
,
710 ptys2ethtool_table
[proto
].supported
,
711 __ETHTOOL_LINK_MODE_MASK_NBITS
);
714 static void ptys2ethtool_adver_link(unsigned long *advertising_modes
,
717 unsigned long proto_cap
= eth_proto_cap
;
720 for_each_set_bit(proto
, &proto_cap
, MLX5E_LINK_MODES_NUMBER
)
721 bitmap_or(advertising_modes
, advertising_modes
,
722 ptys2ethtool_table
[proto
].advertised
,
723 __ETHTOOL_LINK_MODE_MASK_NBITS
);
726 static void ptys2ethtool_supported_port(struct ethtool_link_ksettings
*link_ksettings
,
729 if (eth_proto_cap
& (MLX5E_PROT_MASK(MLX5E_10GBASE_CR
)
730 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR
)
731 | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4
)
732 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4
)
733 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4
)
734 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII
))) {
735 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, FIBRE
);
738 if (eth_proto_cap
& (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4
)
739 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4
)
740 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR
)
741 | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4
)
742 | MLX5E_PROT_MASK(MLX5E_1000BASE_KX
))) {
743 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Backplane
);
747 int mlx5e_get_max_linkspeed(struct mlx5_core_dev
*mdev
, u32
*speed
)
754 err
= mlx5_query_port_proto_cap(mdev
, &proto_cap
, MLX5_PTYS_EN
);
758 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
)
759 if (proto_cap
& MLX5E_PROT_MASK(i
))
760 max_speed
= max(max_speed
, ptys2ethtool_table
[i
].speed
);
766 static void get_speed_duplex(struct net_device
*netdev
,
768 struct ethtool_link_ksettings
*link_ksettings
)
771 u32 speed
= SPEED_UNKNOWN
;
772 u8 duplex
= DUPLEX_UNKNOWN
;
774 if (!netif_carrier_ok(netdev
))
777 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
778 if (eth_proto_oper
& MLX5E_PROT_MASK(i
)) {
779 speed
= ptys2ethtool_table
[i
].speed
;
780 duplex
= DUPLEX_FULL
;
785 link_ksettings
->base
.speed
= speed
;
786 link_ksettings
->base
.duplex
= duplex
;
789 static void get_supported(u32 eth_proto_cap
,
790 struct ethtool_link_ksettings
*link_ksettings
)
792 unsigned long *supported
= link_ksettings
->link_modes
.supported
;
794 ptys2ethtool_supported_port(link_ksettings
, eth_proto_cap
);
795 ptys2ethtool_supported_link(supported
, eth_proto_cap
);
796 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Pause
);
797 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
, Asym_Pause
);
800 static void get_advertising(u32 eth_proto_cap
, u8 tx_pause
,
802 struct ethtool_link_ksettings
*link_ksettings
)
804 unsigned long *advertising
= link_ksettings
->link_modes
.advertising
;
806 ptys2ethtool_adver_link(advertising
, eth_proto_cap
);
808 ethtool_link_ksettings_add_link_mode(link_ksettings
, advertising
, Pause
);
809 if (tx_pause
^ rx_pause
)
810 ethtool_link_ksettings_add_link_mode(link_ksettings
, advertising
, Asym_Pause
);
813 static u8
get_connector_port(u32 eth_proto
)
815 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_10GBASE_SR
)
816 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4
)
817 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4
)
818 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII
))) {
822 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4
)
823 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR
)
824 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4
))) {
828 if (eth_proto
& (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4
)
829 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR
)
830 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4
)
831 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4
))) {
838 static void get_lp_advertising(u32 eth_proto_lp
,
839 struct ethtool_link_ksettings
*link_ksettings
)
841 unsigned long *lp_advertising
= link_ksettings
->link_modes
.lp_advertising
;
843 ptys2ethtool_adver_link(lp_advertising
, eth_proto_lp
);
846 static int mlx5e_get_link_ksettings(struct net_device
*netdev
,
847 struct ethtool_link_ksettings
*link_ksettings
)
849 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
850 struct mlx5_core_dev
*mdev
= priv
->mdev
;
851 u32 out
[MLX5_ST_SZ_DW(ptys_reg
)] = {0};
860 err
= mlx5_query_port_ptys(mdev
, out
, sizeof(out
), MLX5_PTYS_EN
, 1);
862 netdev_err(netdev
, "%s: query port ptys failed: %d\n",
867 eth_proto_cap
= MLX5_GET(ptys_reg
, out
, eth_proto_capability
);
868 eth_proto_admin
= MLX5_GET(ptys_reg
, out
, eth_proto_admin
);
869 eth_proto_oper
= MLX5_GET(ptys_reg
, out
, eth_proto_oper
);
870 eth_proto_lp
= MLX5_GET(ptys_reg
, out
, eth_proto_lp_advertise
);
871 an_disable_admin
= MLX5_GET(ptys_reg
, out
, an_disable_admin
);
872 an_status
= MLX5_GET(ptys_reg
, out
, an_status
);
874 ethtool_link_ksettings_zero_link_mode(link_ksettings
, supported
);
875 ethtool_link_ksettings_zero_link_mode(link_ksettings
, advertising
);
877 get_supported(eth_proto_cap
, link_ksettings
);
878 get_advertising(eth_proto_admin
, 0, 0, link_ksettings
);
879 get_speed_duplex(netdev
, eth_proto_oper
, link_ksettings
);
881 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
883 link_ksettings
->base
.port
= get_connector_port(eth_proto_oper
);
884 get_lp_advertising(eth_proto_lp
, link_ksettings
);
886 if (an_status
== MLX5_AN_COMPLETE
)
887 ethtool_link_ksettings_add_link_mode(link_ksettings
,
888 lp_advertising
, Autoneg
);
890 link_ksettings
->base
.autoneg
= an_disable_admin
? AUTONEG_DISABLE
:
892 ethtool_link_ksettings_add_link_mode(link_ksettings
, supported
,
894 if (!an_disable_admin
)
895 ethtool_link_ksettings_add_link_mode(link_ksettings
,
896 advertising
, Autoneg
);
902 static u32
mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes
)
904 u32 i
, ptys_modes
= 0;
906 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
907 if (bitmap_intersects(ptys2ethtool_table
[i
].advertised
,
909 __ETHTOOL_LINK_MODE_MASK_NBITS
))
910 ptys_modes
|= MLX5E_PROT_MASK(i
);
916 static u32
mlx5e_ethtool2ptys_speed_link(u32 speed
)
918 u32 i
, speed_links
= 0;
920 for (i
= 0; i
< MLX5E_LINK_MODES_NUMBER
; ++i
) {
921 if (ptys2ethtool_table
[i
].speed
== speed
)
922 speed_links
|= MLX5E_PROT_MASK(i
);
928 static int mlx5e_set_link_ksettings(struct net_device
*netdev
,
929 const struct ethtool_link_ksettings
*link_ksettings
)
931 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
932 struct mlx5_core_dev
*mdev
= priv
->mdev
;
933 u32 eth_proto_cap
, eth_proto_admin
;
934 bool an_changes
= false;
943 speed
= link_ksettings
->base
.speed
;
945 link_modes
= link_ksettings
->base
.autoneg
== AUTONEG_ENABLE
?
946 mlx5e_ethtool2ptys_adver_link(link_ksettings
->link_modes
.advertising
) :
947 mlx5e_ethtool2ptys_speed_link(speed
);
949 err
= mlx5_query_port_proto_cap(mdev
, ð_proto_cap
, MLX5_PTYS_EN
);
951 netdev_err(netdev
, "%s: query port eth proto cap failed: %d\n",
956 link_modes
= link_modes
& eth_proto_cap
;
958 netdev_err(netdev
, "%s: Not supported link mode(s) requested",
964 err
= mlx5_query_port_proto_admin(mdev
, ð_proto_admin
, MLX5_PTYS_EN
);
966 netdev_err(netdev
, "%s: query port eth proto admin failed: %d\n",
971 mlx5_query_port_autoneg(mdev
, MLX5_PTYS_EN
, &an_status
,
972 &an_disable_cap
, &an_disable_admin
);
974 an_disable
= link_ksettings
->base
.autoneg
== AUTONEG_DISABLE
;
975 an_changes
= ((!an_disable
&& an_disable_admin
) ||
976 (an_disable
&& !an_disable_admin
));
978 if (!an_changes
&& link_modes
== eth_proto_admin
)
981 mlx5_set_port_ptys(mdev
, an_disable
, link_modes
, MLX5_PTYS_EN
);
982 mlx5_toggle_port_link(mdev
);
988 static u32
mlx5e_get_rxfh_key_size(struct net_device
*netdev
)
990 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
992 return sizeof(priv
->channels
.params
.toeplitz_hash_key
);
995 static u32
mlx5e_get_rxfh_indir_size(struct net_device
*netdev
)
997 return MLX5E_INDIR_RQT_SIZE
;
1000 static int mlx5e_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
1003 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1006 memcpy(indir
, priv
->channels
.params
.indirection_rqt
,
1007 sizeof(priv
->channels
.params
.indirection_rqt
));
1010 memcpy(key
, priv
->channels
.params
.toeplitz_hash_key
,
1011 sizeof(priv
->channels
.params
.toeplitz_hash_key
));
1014 *hfunc
= priv
->channels
.params
.rss_hfunc
;
1019 static void mlx5e_modify_tirs_hash(struct mlx5e_priv
*priv
, void *in
, int inlen
)
1021 void *tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1022 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1023 int ctxlen
= MLX5_ST_SZ_BYTES(tirc
);
1026 MLX5_SET(modify_tir_in
, in
, bitmask
.hash
, 1);
1028 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
1029 memset(tirc
, 0, ctxlen
);
1030 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
);
1031 mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
, inlen
);
1035 static int mlx5e_set_rxfh(struct net_device
*dev
, const u32
*indir
,
1036 const u8
*key
, const u8 hfunc
)
1038 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1039 int inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1040 bool hash_changed
= false;
1043 if ((hfunc
!= ETH_RSS_HASH_NO_CHANGE
) &&
1044 (hfunc
!= ETH_RSS_HASH_XOR
) &&
1045 (hfunc
!= ETH_RSS_HASH_TOP
))
1048 in
= mlx5_vzalloc(inlen
);
1052 mutex_lock(&priv
->state_lock
);
1054 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
1055 hfunc
!= priv
->channels
.params
.rss_hfunc
) {
1056 priv
->channels
.params
.rss_hfunc
= hfunc
;
1057 hash_changed
= true;
1061 memcpy(priv
->channels
.params
.indirection_rqt
, indir
,
1062 sizeof(priv
->channels
.params
.indirection_rqt
));
1064 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
1065 u32 rqtn
= priv
->indir_rqt
.rqtn
;
1066 struct mlx5e_redirect_rqt_param rrp
= {
1070 .hfunc
= priv
->channels
.params
.rss_hfunc
,
1071 .channels
= &priv
->channels
,
1076 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
1081 memcpy(priv
->channels
.params
.toeplitz_hash_key
, key
,
1082 sizeof(priv
->channels
.params
.toeplitz_hash_key
));
1083 hash_changed
= hash_changed
||
1084 priv
->channels
.params
.rss_hfunc
== ETH_RSS_HASH_TOP
;
1088 mlx5e_modify_tirs_hash(priv
, in
, inlen
);
1090 mutex_unlock(&priv
->state_lock
);
1097 static int mlx5e_get_rxnfc(struct net_device
*netdev
,
1098 struct ethtool_rxnfc
*info
, u32
*rule_locs
)
1100 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1103 switch (info
->cmd
) {
1104 case ETHTOOL_GRXRINGS
:
1105 info
->data
= priv
->channels
.params
.num_channels
;
1107 case ETHTOOL_GRXCLSRLCNT
:
1108 info
->rule_cnt
= priv
->fs
.ethtool
.tot_num_rules
;
1110 case ETHTOOL_GRXCLSRULE
:
1111 err
= mlx5e_ethtool_get_flow(priv
, info
, info
->fs
.location
);
1113 case ETHTOOL_GRXCLSRLALL
:
1114 err
= mlx5e_ethtool_get_all_flows(priv
, info
, rule_locs
);
1124 static int mlx5e_get_tunable(struct net_device
*dev
,
1125 const struct ethtool_tunable
*tuna
,
1128 const struct mlx5e_priv
*priv
= netdev_priv(dev
);
1132 case ETHTOOL_TX_COPYBREAK
:
1133 *(u32
*)data
= priv
->channels
.params
.tx_max_inline
;
1143 static int mlx5e_set_tunable(struct net_device
*dev
,
1144 const struct ethtool_tunable
*tuna
,
1147 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1148 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1149 struct mlx5e_channels new_channels
= {};
1153 mutex_lock(&priv
->state_lock
);
1156 case ETHTOOL_TX_COPYBREAK
:
1158 if (val
> mlx5e_get_max_inline_cap(mdev
)) {
1163 new_channels
.params
= priv
->channels
.params
;
1164 new_channels
.params
.tx_max_inline
= val
;
1166 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
1167 priv
->channels
.params
= new_channels
.params
;
1171 err
= mlx5e_open_channels(priv
, &new_channels
);
1174 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
1182 mutex_unlock(&priv
->state_lock
);
1186 static void mlx5e_get_pauseparam(struct net_device
*netdev
,
1187 struct ethtool_pauseparam
*pauseparam
)
1189 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1190 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1193 err
= mlx5_query_port_pause(mdev
, &pauseparam
->rx_pause
,
1194 &pauseparam
->tx_pause
);
1196 netdev_err(netdev
, "%s: mlx5_query_port_pause failed:0x%x\n",
1201 static int mlx5e_set_pauseparam(struct net_device
*netdev
,
1202 struct ethtool_pauseparam
*pauseparam
)
1204 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1205 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1208 if (pauseparam
->autoneg
)
1211 err
= mlx5_set_port_pause(mdev
,
1212 pauseparam
->rx_pause
? 1 : 0,
1213 pauseparam
->tx_pause
? 1 : 0);
1215 netdev_err(netdev
, "%s: mlx5_set_port_pause failed:0x%x\n",
1222 static int mlx5e_get_ts_info(struct net_device
*dev
,
1223 struct ethtool_ts_info
*info
)
1225 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1228 ret
= ethtool_op_get_ts_info(dev
, info
);
1232 info
->phc_index
= priv
->tstamp
.ptp
?
1233 ptp_clock_index(priv
->tstamp
.ptp
) : -1;
1235 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
1238 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
1239 SOF_TIMESTAMPING_RX_HARDWARE
|
1240 SOF_TIMESTAMPING_RAW_HARDWARE
;
1242 info
->tx_types
= (BIT(1) << HWTSTAMP_TX_OFF
) |
1243 (BIT(1) << HWTSTAMP_TX_ON
);
1245 info
->rx_filters
= (BIT(1) << HWTSTAMP_FILTER_NONE
) |
1246 (BIT(1) << HWTSTAMP_FILTER_ALL
);
1251 static __u32
mlx5e_get_wol_supported(struct mlx5_core_dev
*mdev
)
1255 if (MLX5_CAP_GEN(mdev
, wol_g
))
1258 if (MLX5_CAP_GEN(mdev
, wol_s
))
1259 ret
|= WAKE_MAGICSECURE
;
1261 if (MLX5_CAP_GEN(mdev
, wol_a
))
1264 if (MLX5_CAP_GEN(mdev
, wol_b
))
1267 if (MLX5_CAP_GEN(mdev
, wol_m
))
1270 if (MLX5_CAP_GEN(mdev
, wol_u
))
1273 if (MLX5_CAP_GEN(mdev
, wol_p
))
1279 static __u32
mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode
)
1283 if (mode
& MLX5_WOL_MAGIC
)
1286 if (mode
& MLX5_WOL_SECURED_MAGIC
)
1287 ret
|= WAKE_MAGICSECURE
;
1289 if (mode
& MLX5_WOL_ARP
)
1292 if (mode
& MLX5_WOL_BROADCAST
)
1295 if (mode
& MLX5_WOL_MULTICAST
)
1298 if (mode
& MLX5_WOL_UNICAST
)
1301 if (mode
& MLX5_WOL_PHY_ACTIVITY
)
1307 static u8
mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode
)
1311 if (mode
& WAKE_MAGIC
)
1312 ret
|= MLX5_WOL_MAGIC
;
1314 if (mode
& WAKE_MAGICSECURE
)
1315 ret
|= MLX5_WOL_SECURED_MAGIC
;
1317 if (mode
& WAKE_ARP
)
1318 ret
|= MLX5_WOL_ARP
;
1320 if (mode
& WAKE_BCAST
)
1321 ret
|= MLX5_WOL_BROADCAST
;
1323 if (mode
& WAKE_MCAST
)
1324 ret
|= MLX5_WOL_MULTICAST
;
1326 if (mode
& WAKE_UCAST
)
1327 ret
|= MLX5_WOL_UNICAST
;
1329 if (mode
& WAKE_PHY
)
1330 ret
|= MLX5_WOL_PHY_ACTIVITY
;
1335 static void mlx5e_get_wol(struct net_device
*netdev
,
1336 struct ethtool_wolinfo
*wol
)
1338 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1339 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1343 memset(wol
, 0, sizeof(*wol
));
1345 wol
->supported
= mlx5e_get_wol_supported(mdev
);
1346 if (!wol
->supported
)
1349 err
= mlx5_query_port_wol(mdev
, &mlx5_wol_mode
);
1353 wol
->wolopts
= mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode
);
1356 static int mlx5e_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
1358 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1359 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1360 __u32 wol_supported
= mlx5e_get_wol_supported(mdev
);
1366 if (wol
->wolopts
& ~wol_supported
)
1369 mlx5_wol_mode
= mlx5e_refomrat_wol_mode_linux_to_mlx5(wol
->wolopts
);
1371 return mlx5_set_port_wol(mdev
, mlx5_wol_mode
);
1374 static int mlx5e_set_phys_id(struct net_device
*dev
,
1375 enum ethtool_phys_id_state state
)
1377 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1378 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1379 u16 beacon_duration
;
1381 if (!MLX5_CAP_GEN(mdev
, beacon_led
))
1385 case ETHTOOL_ID_ACTIVE
:
1386 beacon_duration
= MLX5_BEACON_DURATION_INF
;
1388 case ETHTOOL_ID_INACTIVE
:
1389 beacon_duration
= MLX5_BEACON_DURATION_OFF
;
1395 return mlx5_set_port_beacon(mdev
, beacon_duration
);
1398 static int mlx5e_get_module_info(struct net_device
*netdev
,
1399 struct ethtool_modinfo
*modinfo
)
1401 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1402 struct mlx5_core_dev
*dev
= priv
->mdev
;
1406 size_read
= mlx5_query_module_eeprom(dev
, 0, 2, data
);
1410 /* data[0] = identifier byte */
1412 case MLX5_MODULE_ID_QSFP
:
1413 modinfo
->type
= ETH_MODULE_SFF_8436
;
1414 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1416 case MLX5_MODULE_ID_QSFP_PLUS
:
1417 case MLX5_MODULE_ID_QSFP28
:
1418 /* data[1] = revision id */
1419 if (data
[0] == MLX5_MODULE_ID_QSFP28
|| data
[1] >= 0x3) {
1420 modinfo
->type
= ETH_MODULE_SFF_8636
;
1421 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1423 modinfo
->type
= ETH_MODULE_SFF_8436
;
1424 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1427 case MLX5_MODULE_ID_SFP
:
1428 modinfo
->type
= ETH_MODULE_SFF_8472
;
1429 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1432 netdev_err(priv
->netdev
, "%s: cable type not recognized:0x%x\n",
1440 static int mlx5e_get_module_eeprom(struct net_device
*netdev
,
1441 struct ethtool_eeprom
*ee
,
1444 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1445 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1446 int offset
= ee
->offset
;
1453 memset(data
, 0, ee
->len
);
1455 while (i
< ee
->len
) {
1456 size_read
= mlx5_query_module_eeprom(mdev
, offset
, ee
->len
- i
,
1463 if (size_read
< 0) {
1464 netdev_err(priv
->netdev
, "%s: mlx5_query_eeprom failed:0x%x\n",
1465 __func__
, size_read
);
1470 offset
+= size_read
;
1476 typedef int (*mlx5e_pflag_handler
)(struct net_device
*netdev
, bool enable
);
1478 static int set_pflag_rx_cqe_based_moder(struct net_device
*netdev
, bool enable
)
1480 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1481 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1482 struct mlx5e_channels new_channels
= {};
1483 bool rx_mode_changed
;
1484 u8 rx_cq_period_mode
;
1487 rx_cq_period_mode
= enable
?
1488 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
1489 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1490 rx_mode_changed
= rx_cq_period_mode
!= priv
->channels
.params
.rx_cq_period_mode
;
1492 if (rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
&&
1493 !MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
))
1496 if (!rx_mode_changed
)
1499 new_channels
.params
= priv
->channels
.params
;
1500 mlx5e_set_rx_cq_mode_params(&new_channels
.params
, rx_cq_period_mode
);
1502 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
1503 priv
->channels
.params
= new_channels
.params
;
1507 err
= mlx5e_open_channels(priv
, &new_channels
);
1511 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
1515 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv
*priv
, bool new_val
)
1517 bool curr_val
= MLX5E_GET_PFLAG(&priv
->channels
.params
, MLX5E_PFLAG_RX_CQE_COMPRESS
);
1518 struct mlx5e_channels new_channels
= {};
1521 if (!MLX5_CAP_GEN(priv
->mdev
, cqe_compression
))
1522 return new_val
? -EOPNOTSUPP
: 0;
1524 if (curr_val
== new_val
)
1527 new_channels
.params
= priv
->channels
.params
;
1528 MLX5E_SET_PFLAG(&new_channels
.params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, new_val
);
1530 mlx5e_set_rq_type_params(priv
->mdev
, &new_channels
.params
,
1531 new_channels
.params
.rq_wq_type
);
1533 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
1534 priv
->channels
.params
= new_channels
.params
;
1538 err
= mlx5e_open_channels(priv
, &new_channels
);
1542 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
1546 static int set_pflag_rx_cqe_compress(struct net_device
*netdev
,
1549 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1550 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1552 if (!MLX5_CAP_GEN(mdev
, cqe_compression
))
1555 if (enable
&& priv
->tstamp
.hwtstamp_config
.rx_filter
!= HWTSTAMP_FILTER_NONE
) {
1556 netdev_err(netdev
, "Can't enable cqe compression while timestamping is enabled.\n");
1560 mlx5e_modify_rx_cqe_compression_locked(priv
, enable
);
1561 priv
->channels
.params
.rx_cqe_compress_def
= enable
;
1566 static int mlx5e_handle_pflag(struct net_device
*netdev
,
1568 enum mlx5e_priv_flag flag
,
1569 mlx5e_pflag_handler pflag_handler
)
1571 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1572 bool enable
= !!(wanted_flags
& flag
);
1573 u32 changes
= wanted_flags
^ priv
->channels
.params
.pflags
;
1576 if (!(changes
& flag
))
1579 err
= pflag_handler(netdev
, enable
);
1581 netdev_err(netdev
, "%s private flag 0x%x failed err %d\n",
1582 enable
? "Enable" : "Disable", flag
, err
);
1586 MLX5E_SET_PFLAG(&priv
->channels
.params
, flag
, enable
);
1590 static int mlx5e_set_priv_flags(struct net_device
*netdev
, u32 pflags
)
1592 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1595 mutex_lock(&priv
->state_lock
);
1596 err
= mlx5e_handle_pflag(netdev
, pflags
,
1597 MLX5E_PFLAG_RX_CQE_BASED_MODER
,
1598 set_pflag_rx_cqe_based_moder
);
1602 err
= mlx5e_handle_pflag(netdev
, pflags
,
1603 MLX5E_PFLAG_RX_CQE_COMPRESS
,
1604 set_pflag_rx_cqe_compress
);
1607 mutex_unlock(&priv
->state_lock
);
1611 static u32
mlx5e_get_priv_flags(struct net_device
*netdev
)
1613 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1615 return priv
->channels
.params
.pflags
;
1618 static int mlx5e_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1621 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1624 case ETHTOOL_SRXCLSRLINS
:
1625 err
= mlx5e_ethtool_flow_replace(priv
, &cmd
->fs
);
1627 case ETHTOOL_SRXCLSRLDEL
:
1628 err
= mlx5e_ethtool_flow_remove(priv
, cmd
->fs
.location
);
1638 const struct ethtool_ops mlx5e_ethtool_ops
= {
1639 .get_drvinfo
= mlx5e_get_drvinfo
,
1640 .get_link
= ethtool_op_get_link
,
1641 .get_strings
= mlx5e_get_strings
,
1642 .get_sset_count
= mlx5e_get_sset_count
,
1643 .get_ethtool_stats
= mlx5e_get_ethtool_stats
,
1644 .get_ringparam
= mlx5e_get_ringparam
,
1645 .set_ringparam
= mlx5e_set_ringparam
,
1646 .get_channels
= mlx5e_get_channels
,
1647 .set_channels
= mlx5e_set_channels
,
1648 .get_coalesce
= mlx5e_get_coalesce
,
1649 .set_coalesce
= mlx5e_set_coalesce
,
1650 .get_link_ksettings
= mlx5e_get_link_ksettings
,
1651 .set_link_ksettings
= mlx5e_set_link_ksettings
,
1652 .get_rxfh_key_size
= mlx5e_get_rxfh_key_size
,
1653 .get_rxfh_indir_size
= mlx5e_get_rxfh_indir_size
,
1654 .get_rxfh
= mlx5e_get_rxfh
,
1655 .set_rxfh
= mlx5e_set_rxfh
,
1656 .get_rxnfc
= mlx5e_get_rxnfc
,
1657 .set_rxnfc
= mlx5e_set_rxnfc
,
1658 .get_tunable
= mlx5e_get_tunable
,
1659 .set_tunable
= mlx5e_set_tunable
,
1660 .get_pauseparam
= mlx5e_get_pauseparam
,
1661 .set_pauseparam
= mlx5e_set_pauseparam
,
1662 .get_ts_info
= mlx5e_get_ts_info
,
1663 .set_phys_id
= mlx5e_set_phys_id
,
1664 .get_wol
= mlx5e_get_wol
,
1665 .set_wol
= mlx5e_set_wol
,
1666 .get_module_info
= mlx5e_get_module_info
,
1667 .get_module_eeprom
= mlx5e_get_module_eeprom
,
1668 .get_priv_flags
= mlx5e_get_priv_flags
,
1669 .set_priv_flags
= mlx5e_set_priv_flags
,
1670 .self_test
= mlx5e_self_test
,