]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_stats.c
1 /*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "en.h"
34 #include "en_accel/ipsec.h"
35
36 static const struct counter_desc sw_stats_desc[] = {
37 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
38 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
49 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
50 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
75 };
76
77 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
78
79 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
80 {
81 return NUM_SW_COUNTERS;
82 }
83
84 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
85 {
86 int i;
87
88 for (i = 0; i < NUM_SW_COUNTERS; i++)
89 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
90 return idx;
91 }
92
93 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
94 {
95 int i;
96
97 for (i = 0; i < NUM_SW_COUNTERS; i++)
98 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
99 return idx;
100 }
101
102 static const struct counter_desc q_stats_desc[] = {
103 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
104 };
105
106 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
107
108 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
109 {
110 return priv->q_counter ? NUM_Q_COUNTERS : 0;
111 }
112
113 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
114 {
115 int i;
116
117 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
118 strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
119 return idx;
120 }
121
122 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
123 {
124 int i;
125
126 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
127 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
128 return idx;
129 }
130
131 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
132 static const struct counter_desc vport_stats_desc[] = {
133 { "rx_vport_unicast_packets",
134 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
135 { "rx_vport_unicast_bytes",
136 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
137 { "tx_vport_unicast_packets",
138 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
139 { "tx_vport_unicast_bytes",
140 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
141 { "rx_vport_multicast_packets",
142 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
143 { "rx_vport_multicast_bytes",
144 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
145 { "tx_vport_multicast_packets",
146 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
147 { "tx_vport_multicast_bytes",
148 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
149 { "rx_vport_broadcast_packets",
150 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
151 { "rx_vport_broadcast_bytes",
152 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
153 { "tx_vport_broadcast_packets",
154 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
155 { "tx_vport_broadcast_bytes",
156 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
157 { "rx_vport_rdma_unicast_packets",
158 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
159 { "rx_vport_rdma_unicast_bytes",
160 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
161 { "tx_vport_rdma_unicast_packets",
162 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
163 { "tx_vport_rdma_unicast_bytes",
164 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
165 { "rx_vport_rdma_multicast_packets",
166 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
167 { "rx_vport_rdma_multicast_bytes",
168 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
169 { "tx_vport_rdma_multicast_packets",
170 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
171 { "tx_vport_rdma_multicast_bytes",
172 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
173 };
174
175 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
176
177 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
178 {
179 return NUM_VPORT_COUNTERS;
180 }
181
182 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
183 int idx)
184 {
185 int i;
186
187 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
188 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
189 return idx;
190 }
191
192 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
193 int idx)
194 {
195 int i;
196
197 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
198 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
199 vport_stats_desc, i);
200 return idx;
201 }
202
203 #define PPORT_802_3_OFF(c) \
204 MLX5_BYTE_OFF(ppcnt_reg, \
205 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
206 static const struct counter_desc pport_802_3_stats_desc[] = {
207 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
208 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
209 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
210 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
211 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
212 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
213 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
214 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
215 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
216 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
217 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
218 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
219 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
220 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
221 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
222 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
223 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
224 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
225 };
226
227 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
228
229 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
230 {
231 return NUM_PPORT_802_3_COUNTERS;
232 }
233
234 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
235 int idx)
236 {
237 int i;
238
239 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
240 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
241 return idx;
242 }
243
244 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
245 int idx)
246 {
247 int i;
248
249 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
250 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
251 pport_802_3_stats_desc, i);
252 return idx;
253 }
254
255 #define PPORT_2863_OFF(c) \
256 MLX5_BYTE_OFF(ppcnt_reg, \
257 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
258 static const struct counter_desc pport_2863_stats_desc[] = {
259 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
260 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
261 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
262 };
263
264 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
265
266 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
267 {
268 return NUM_PPORT_2863_COUNTERS;
269 }
270
271 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
272 int idx)
273 {
274 int i;
275
276 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
277 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
278 return idx;
279 }
280
281 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
282 int idx)
283 {
284 int i;
285
286 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
287 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
288 pport_2863_stats_desc, i);
289 return idx;
290 }
291
292 #define PPORT_2819_OFF(c) \
293 MLX5_BYTE_OFF(ppcnt_reg, \
294 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
295 static const struct counter_desc pport_2819_stats_desc[] = {
296 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
297 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
298 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
299 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
300 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
301 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
302 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
303 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
304 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
305 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
306 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
307 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
308 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
309 };
310
311 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
312
313 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
314 {
315 return NUM_PPORT_2819_COUNTERS;
316 }
317
318 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
319 int idx)
320 {
321 int i;
322
323 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
324 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
325 return idx;
326 }
327
328 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
329 int idx)
330 {
331 int i;
332
333 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
334 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
335 pport_2819_stats_desc, i);
336 return idx;
337 }
338
339 #define PPORT_PHY_STATISTICAL_OFF(c) \
340 MLX5_BYTE_OFF(ppcnt_reg, \
341 counter_set.phys_layer_statistical_cntrs.c##_high)
342 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
343 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
344 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
345 };
346
347 #define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
348
349 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
350 {
351 return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
352 NUM_PPORT_PHY_COUNTERS : 0;
353 }
354
355 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
356 int idx)
357 {
358 int i;
359
360 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
361 for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
362 strcpy(data + (idx++) * ETH_GSTRING_LEN,
363 pport_phy_statistical_stats_desc[i].format);
364 return idx;
365 }
366
367 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
368 {
369 int i;
370
371 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
372 for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
373 data[idx++] =
374 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
375 pport_phy_statistical_stats_desc, i);
376 return idx;
377 }
378
379 #define PPORT_ETH_EXT_OFF(c) \
380 MLX5_BYTE_OFF(ppcnt_reg, \
381 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
382 static const struct counter_desc pport_eth_ext_stats_desc[] = {
383 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
384 };
385
386 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
387
388 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
389 {
390 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
391 return NUM_PPORT_ETH_EXT_COUNTERS;
392
393 return 0;
394 }
395
396 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
397 int idx)
398 {
399 int i;
400
401 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
402 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
403 strcpy(data + (idx++) * ETH_GSTRING_LEN,
404 pport_eth_ext_stats_desc[i].format);
405 return idx;
406 }
407
408 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
409 int idx)
410 {
411 int i;
412
413 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
414 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
415 data[idx++] =
416 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
417 pport_eth_ext_stats_desc, i);
418 return idx;
419 }
420
421 #define PCIE_PERF_OFF(c) \
422 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
423 static const struct counter_desc pcie_perf_stats_desc[] = {
424 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
425 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
426 };
427
428 #define PCIE_PERF_OFF64(c) \
429 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
430 static const struct counter_desc pcie_perf_stats_desc64[] = {
431 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
432 };
433
434 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
435 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
436 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
437 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
438 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
439 };
440
441 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
442 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
443 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
444
445 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
446 {
447 int num_stats = 0;
448
449 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
450 num_stats += NUM_PCIE_PERF_COUNTERS;
451
452 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
453 num_stats += NUM_PCIE_PERF_COUNTERS64;
454
455 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
456 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
457
458 return num_stats;
459 }
460
461 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
462 int idx)
463 {
464 int i;
465
466 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
467 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
468 strcpy(data + (idx++) * ETH_GSTRING_LEN,
469 pcie_perf_stats_desc[i].format);
470
471 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
472 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
473 strcpy(data + (idx++) * ETH_GSTRING_LEN,
474 pcie_perf_stats_desc64[i].format);
475
476 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
477 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
478 strcpy(data + (idx++) * ETH_GSTRING_LEN,
479 pcie_perf_stall_stats_desc[i].format);
480 return idx;
481 }
482
483 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
484 int idx)
485 {
486 int i;
487
488 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
489 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
490 data[idx++] =
491 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
492 pcie_perf_stats_desc, i);
493
494 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
495 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
496 data[idx++] =
497 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
498 pcie_perf_stats_desc64, i);
499
500 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
501 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
502 data[idx++] =
503 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
504 pcie_perf_stall_stats_desc, i);
505 return idx;
506 }
507
508 #define PPORT_PER_PRIO_OFF(c) \
509 MLX5_BYTE_OFF(ppcnt_reg, \
510 counter_set.eth_per_prio_grp_data_layout.c##_high)
511 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
512 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
513 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
514 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
515 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
516 };
517
518 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
519
520 static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
521 {
522 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
523 }
524
525 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
526 u8 *data,
527 int idx)
528 {
529 int i, prio;
530
531 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
532 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
533 sprintf(data + (idx++) * ETH_GSTRING_LEN,
534 pport_per_prio_traffic_stats_desc[i].format, prio);
535 }
536
537 return idx;
538 }
539
540 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
541 u64 *data,
542 int idx)
543 {
544 int i, prio;
545
546 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
547 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
548 data[idx++] =
549 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
550 pport_per_prio_traffic_stats_desc, i);
551 }
552
553 return idx;
554 }
555
556 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
557 /* %s is "global" or "prio{i}" */
558 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
559 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
560 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
561 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
562 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
563 };
564
565 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
566
567 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
568 {
569 struct mlx5_core_dev *mdev = priv->mdev;
570 u8 pfc_en_tx;
571 u8 pfc_en_rx;
572 int err;
573
574 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
575 return 0;
576
577 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
578
579 return err ? 0 : pfc_en_tx | pfc_en_rx;
580 }
581
582 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
583 {
584 struct mlx5_core_dev *mdev = priv->mdev;
585 u32 rx_pause;
586 u32 tx_pause;
587 int err;
588
589 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
590 return false;
591
592 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
593
594 return err ? false : rx_pause | tx_pause;
595 }
596
597 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
598 {
599 return (mlx5e_query_global_pause_combined(priv) +
600 hweight8(mlx5e_query_pfc_combined(priv))) *
601 NUM_PPORT_PER_PRIO_PFC_COUNTERS;
602 }
603
604 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
605 u8 *data,
606 int idx)
607 {
608 unsigned long pfc_combined;
609 int i, prio;
610
611 pfc_combined = mlx5e_query_pfc_combined(priv);
612 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
613 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
614 char pfc_string[ETH_GSTRING_LEN];
615
616 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
617 sprintf(data + (idx++) * ETH_GSTRING_LEN,
618 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
619 }
620 }
621
622 if (mlx5e_query_global_pause_combined(priv)) {
623 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
624 sprintf(data + (idx++) * ETH_GSTRING_LEN,
625 pport_per_prio_pfc_stats_desc[i].format, "global");
626 }
627 }
628
629 return idx;
630 }
631
632 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
633 u64 *data,
634 int idx)
635 {
636 unsigned long pfc_combined;
637 int i, prio;
638
639 pfc_combined = mlx5e_query_pfc_combined(priv);
640 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
641 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
642 data[idx++] =
643 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
644 pport_per_prio_pfc_stats_desc, i);
645 }
646 }
647
648 if (mlx5e_query_global_pause_combined(priv)) {
649 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
650 data[idx++] =
651 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
652 pport_per_prio_pfc_stats_desc, i);
653 }
654 }
655
656 return idx;
657 }
658
659 static const struct counter_desc mlx5e_pme_status_desc[] = {
660 { "module_unplug", 8 },
661 };
662
663 static const struct counter_desc mlx5e_pme_error_desc[] = {
664 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
665 { "module_high_temp", 48 }, /* high temperature */
666 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
667 };
668
669 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
670 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
671
672 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
673 {
674 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
675 }
676
677 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
678 int idx)
679 {
680 int i;
681
682 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
683 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
684
685 for (i = 0; i < NUM_PME_ERR_STATS; i++)
686 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
687
688 return idx;
689 }
690
691 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
692 int idx)
693 {
694 struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
695 int i;
696
697 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
698 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
699 mlx5e_pme_status_desc, i);
700
701 for (i = 0; i < NUM_PME_ERR_STATS; i++)
702 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
703 mlx5e_pme_error_desc, i);
704
705 return idx;
706 }
707
708 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
709 {
710 return mlx5e_ipsec_get_count(priv);
711 }
712
713 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
714 int idx)
715 {
716 return idx + mlx5e_ipsec_get_strings(priv,
717 data + idx * ETH_GSTRING_LEN);
718 }
719
720 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
721 int idx)
722 {
723 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
724 }
725
726 static const struct counter_desc rq_stats_desc[] = {
727 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
728 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
729 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
730 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
731 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
732 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
733 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
734 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
735 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
736 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
737 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
738 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
739 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
740 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
741 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
742 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
743 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
744 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
745 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
746 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
747 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
748 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
749 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
750 };
751
752 static const struct counter_desc sq_stats_desc[] = {
753 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
754 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
755 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
756 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
757 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
758 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
759 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
760 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
761 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
762 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
763 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
764 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
765 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
766 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
767 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
768 };
769
770 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
771 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
772
773 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
774 {
775 return (NUM_RQ_STATS * priv->channels.num) +
776 (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
777 }
778
779 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
780 int idx)
781 {
782 int i, j, tc;
783
784 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
785 return idx;
786
787 for (i = 0; i < priv->channels.num; i++)
788 for (j = 0; j < NUM_RQ_STATS; j++)
789 sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
790
791 for (tc = 0; tc < priv->channels.params.num_tc; tc++)
792 for (i = 0; i < priv->channels.num; i++)
793 for (j = 0; j < NUM_SQ_STATS; j++)
794 sprintf(data + (idx++) * ETH_GSTRING_LEN,
795 sq_stats_desc[j].format,
796 priv->channel_tc2txq[i][tc]);
797
798 return idx;
799 }
800
801 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
802 int idx)
803 {
804 struct mlx5e_channels *channels = &priv->channels;
805 int i, j, tc;
806
807 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
808 return idx;
809
810 for (i = 0; i < channels->num; i++)
811 for (j = 0; j < NUM_RQ_STATS; j++)
812 data[idx++] =
813 MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
814 rq_stats_desc, j);
815
816 for (tc = 0; tc < priv->channels.params.num_tc; tc++)
817 for (i = 0; i < channels->num; i++)
818 for (j = 0; j < NUM_SQ_STATS; j++)
819 data[idx++] =
820 MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
821 sq_stats_desc, j);
822
823 return idx;
824 }
825
826 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
827 {
828 .get_num_stats = mlx5e_grp_sw_get_num_stats,
829 .fill_strings = mlx5e_grp_sw_fill_strings,
830 .fill_stats = mlx5e_grp_sw_fill_stats,
831 },
832 {
833 .get_num_stats = mlx5e_grp_q_get_num_stats,
834 .fill_strings = mlx5e_grp_q_fill_strings,
835 .fill_stats = mlx5e_grp_q_fill_stats,
836 },
837 {
838 .get_num_stats = mlx5e_grp_vport_get_num_stats,
839 .fill_strings = mlx5e_grp_vport_fill_strings,
840 .fill_stats = mlx5e_grp_vport_fill_stats,
841 },
842 {
843 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
844 .fill_strings = mlx5e_grp_802_3_fill_strings,
845 .fill_stats = mlx5e_grp_802_3_fill_stats,
846 },
847 {
848 .get_num_stats = mlx5e_grp_2863_get_num_stats,
849 .fill_strings = mlx5e_grp_2863_fill_strings,
850 .fill_stats = mlx5e_grp_2863_fill_stats,
851 },
852 {
853 .get_num_stats = mlx5e_grp_2819_get_num_stats,
854 .fill_strings = mlx5e_grp_2819_fill_strings,
855 .fill_stats = mlx5e_grp_2819_fill_stats,
856 },
857 {
858 .get_num_stats = mlx5e_grp_phy_get_num_stats,
859 .fill_strings = mlx5e_grp_phy_fill_strings,
860 .fill_stats = mlx5e_grp_phy_fill_stats,
861 },
862 {
863 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
864 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
865 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
866 },
867 {
868 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
869 .fill_strings = mlx5e_grp_pcie_fill_strings,
870 .fill_stats = mlx5e_grp_pcie_fill_stats,
871 },
872 {
873 .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
874 .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
875 .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
876 },
877 {
878 .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
879 .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
880 .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
881 },
882 {
883 .get_num_stats = mlx5e_grp_pme_get_num_stats,
884 .fill_strings = mlx5e_grp_pme_fill_strings,
885 .fill_stats = mlx5e_grp_pme_fill_stats,
886 },
887 {
888 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
889 .fill_strings = mlx5e_grp_ipsec_fill_strings,
890 .fill_stats = mlx5e_grp_ipsec_fill_stats,
891 },
892 {
893 .get_num_stats = mlx5e_grp_channels_get_num_stats,
894 .fill_strings = mlx5e_grp_channels_fill_strings,
895 .fill_stats = mlx5e_grp_channels_fill_stats,
896 }
897 };
898
899 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);