]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
net/mlx5e: Declare stats groups via macro
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_stats.c
CommitLineData
c0752f2b
KH
1/*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
69c1280b 33#include "lib/mlx5.h"
c0752f2b 34#include "en.h"
e185d43f 35#include "en_accel/ipsec.h"
43585a41 36#include "en_accel/tls.h"
c0752f2b 37
3460c184
SM
38static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39{
40 return !priv->profile->stats_grps_num ? 0 :
41 priv->profile->stats_grps_num(priv);
42}
43
44unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45{
46 const struct mlx5e_stats_grp *stats_grps = priv->profile->stats_grps;
47 const unsigned int num_stats_grps = stats_grps_num(priv);
48 unsigned int total = 0;
49 int i;
50
51 for (i = 0; i < num_stats_grps; i++)
52 total += stats_grps[i].get_num_stats(priv);
53
54 return total;
55}
56
57void mlx5e_stats_update(struct mlx5e_priv *priv)
58{
59 const struct mlx5e_stats_grp *stats_grps = priv->profile->stats_grps;
60 const unsigned int num_stats_grps = stats_grps_num(priv);
61 int i;
62
63 for (i = num_stats_grps - 1; i >= 0; i--)
64 if (stats_grps[i].update_stats)
65 stats_grps[i].update_stats(priv);
66}
67
68void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
69{
70 const struct mlx5e_stats_grp *stats_grps = priv->profile->stats_grps;
71 const unsigned int num_stats_grps = stats_grps_num(priv);
72 int i;
73
74 for (i = 0; i < num_stats_grps; i++)
75 idx = stats_grps[i].fill_stats(priv, data, idx);
76}
77
78void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
79{
80 const struct mlx5e_stats_grp *stats_grps = priv->profile->stats_grps;
81 const unsigned int num_stats_grps = stats_grps_num(priv);
82 int i, idx = 0;
83
84 for (i = 0; i < num_stats_grps; i++)
85 idx = stats_grps[i].fill_strings(priv, data, idx);
86}
87
88/* Concrete NIC Stats */
89
c0752f2b
KH
90static const struct counter_desc sw_stats_desc[] = {
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
f24686e8 99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
2ad9ecdb 100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
bf239741
IL
101
102#ifdef CONFIG_MLX5_EN_TLS
d2ead1f3
TT
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
bf239741 106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
46a3ea98
TT
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
bf239741 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
46a3ea98 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
d2ead1f3
TT
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
bf239741
IL
113#endif
114
c0752f2b
KH
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
f007c13d 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
f24686e8 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
c0752f2b
KH
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
0aa1d186
SM
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
c0752f2b
KH
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
86690b4b 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
890388ad 127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
73cab880 128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
c2273219 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
6c085a8a 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
c0752f2b 131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
890388ad
TT
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
c0752f2b
KH
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
c0752f2b
KH
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
db75373c 140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
86155656 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
f65a59ff
TT
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
58b99ee3 144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
73cab880 145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
c2273219 146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
6c085a8a 147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
58b99ee3
TT
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
c0752f2b 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
b71ba6b4
TT
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
0073c8f7 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
c0752f2b
KH
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
c0752f2b
KH
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
dc983f0e 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
94563847 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
be5323c8 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
a1bf74dc 166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
2d7103c8
TT
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
db05815b 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
57d689a8 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
db05815b
MM
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
c0752f2b
KH
197};
198
199#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
200
96b12796 201static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
c0752f2b
KH
202{
203 return NUM_SW_COUNTERS;
204}
205
96b12796 206static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
c0752f2b
KH
207{
208 int i;
209
210 for (i = 0; i < NUM_SW_COUNTERS; i++)
211 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
212 return idx;
213}
214
96b12796 215static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
c0752f2b
KH
216{
217 int i;
218
219 for (i = 0; i < NUM_SW_COUNTERS; i++)
220 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
221 return idx;
222}
223
96b12796 224static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
19386177 225{
9659e49a 226 struct mlx5e_sw_stats *s = &priv->stats.sw;
05909bab 227 int i;
19386177
KH
228
229 memset(s, 0, sizeof(*s));
19386177 230
694826e3 231 for (i = 0; i < priv->max_nch; i++) {
05909bab
EBE
232 struct mlx5e_channel_stats *channel_stats =
233 &priv->channel_stats[i];
58b99ee3 234 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
890388ad 235 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
db05815b
MM
236 struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
237 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
05909bab
EBE
238 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
239 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
240 int j;
19386177
KH
241
242 s->rx_packets += rq_stats->packets;
243 s->rx_bytes += rq_stats->bytes;
244 s->rx_lro_packets += rq_stats->lro_packets;
245 s->rx_lro_bytes += rq_stats->lro_bytes;
f007c13d 246 s->rx_ecn_mark += rq_stats->ecn_mark;
19386177
KH
247 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
248 s->rx_csum_none += rq_stats->csum_none;
249 s->rx_csum_complete += rq_stats->csum_complete;
0aa1d186
SM
250 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
251 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
19386177
KH
252 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
253 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
890388ad 254 s->rx_xdp_drop += rq_stats->xdp_drop;
86690b4b 255 s->rx_xdp_redirect += rq_stats->xdp_redirect;
890388ad 256 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
73cab880 257 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
c2273219 258 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
6c085a8a 259 s->rx_xdp_tx_nops += xdpsq_stats->nops;
890388ad
TT
260 s->rx_xdp_tx_full += xdpsq_stats->full;
261 s->rx_xdp_tx_err += xdpsq_stats->err;
262 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
19386177 263 s->rx_wqe_err += rq_stats->wqe_err;
b71ba6b4
TT
264 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
265 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
0073c8f7 266 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
19386177
KH
267 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
268 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
269 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
19386177
KH
270 s->rx_cache_reuse += rq_stats->cache_reuse;
271 s->rx_cache_full += rq_stats->cache_full;
272 s->rx_cache_empty += rq_stats->cache_empty;
273 s->rx_cache_busy += rq_stats->cache_busy;
274 s->rx_cache_waive += rq_stats->cache_waive;
dc983f0e 275 s->rx_congst_umr += rq_stats->congst_umr;
94563847 276 s->rx_arfs_err += rq_stats->arfs_err;
be5323c8 277 s->rx_recover += rq_stats->recover;
a1bf74dc 278 s->ch_events += ch_stats->events;
2d7103c8
TT
279 s->ch_poll += ch_stats->poll;
280 s->ch_arm += ch_stats->arm;
281 s->ch_aff_change += ch_stats->aff_change;
db05815b 282 s->ch_force_irq += ch_stats->force_irq;
890388ad 283 s->ch_eq_rearm += ch_stats->eq_rearm;
58b99ee3
TT
284 /* xdp redirect */
285 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
c2273219
SA
286 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
287 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
6c085a8a 288 s->tx_xdp_nops += xdpsq_red_stats->nops;
58b99ee3
TT
289 s->tx_xdp_full += xdpsq_red_stats->full;
290 s->tx_xdp_err += xdpsq_red_stats->err;
291 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
db05815b
MM
292 /* AF_XDP zero-copy */
293 s->rx_xsk_packets += xskrq_stats->packets;
294 s->rx_xsk_bytes += xskrq_stats->bytes;
295 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
296 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
297 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
298 s->rx_xsk_csum_none += xskrq_stats->csum_none;
299 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
300 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
301 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
302 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
303 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
304 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
305 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
306 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
307 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
308 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
309 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
310 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
311 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
312 s->tx_xsk_xmit += xsksq_stats->xmit;
313 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
314 s->tx_xsk_inlnw += xsksq_stats->inlnw;
315 s->tx_xsk_full += xsksq_stats->full;
316 s->tx_xsk_err += xsksq_stats->err;
317 s->tx_xsk_cqes += xsksq_stats->cqes;
19386177 318
05909bab
EBE
319 for (j = 0; j < priv->max_opened_tc; j++) {
320 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
19386177
KH
321
322 s->tx_packets += sq_stats->packets;
323 s->tx_bytes += sq_stats->bytes;
324 s->tx_tso_packets += sq_stats->tso_packets;
325 s->tx_tso_bytes += sq_stats->tso_bytes;
326 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
327 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
328 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
2ad9ecdb 329 s->tx_nop += sq_stats->nop;
19386177
KH
330 s->tx_queue_stopped += sq_stats->stopped;
331 s->tx_queue_wake += sq_stats->wake;
332 s->tx_queue_dropped += sq_stats->dropped;
16cc14d8 333 s->tx_cqe_err += sq_stats->cqe_err;
db75373c 334 s->tx_recover += sq_stats->recover;
19386177
KH
335 s->tx_xmit_more += sq_stats->xmit_more;
336 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
337 s->tx_csum_none += sq_stats->csum_none;
338 s->tx_csum_partial += sq_stats->csum_partial;
bf239741 339#ifdef CONFIG_MLX5_EN_TLS
d2ead1f3
TT
340 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
341 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
342 s->tx_tls_ctx += sq_stats->tls_ctx;
343 s->tx_tls_ooo += sq_stats->tls_ooo;
46a3ea98
TT
344 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
345 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
d2ead1f3 346 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
46a3ea98 347 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
d2ead1f3
TT
348 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
349 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
bf239741 350#endif
86155656 351 s->tx_cqes += sq_stats->cqes;
42ae1a5c
AB
352
353 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
354 barrier();
19386177
KH
355 }
356 }
19386177
KH
357}
358
fd8dcdb8
KH
359static const struct counter_desc q_stats_desc[] = {
360 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
361};
362
7cbaf9a3
MS
363static const struct counter_desc drop_rq_stats_desc[] = {
364 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
365};
366
fd8dcdb8 367#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
7cbaf9a3 368#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
fd8dcdb8 369
96b12796 370static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
fd8dcdb8 371{
7cbaf9a3
MS
372 int num_stats = 0;
373
374 if (priv->q_counter)
375 num_stats += NUM_Q_COUNTERS;
376
377 if (priv->drop_rq_q_counter)
378 num_stats += NUM_DROP_RQ_COUNTERS;
379
380 return num_stats;
fd8dcdb8
KH
381}
382
96b12796 383static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
fd8dcdb8
KH
384{
385 int i;
386
387 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
7cbaf9a3
MS
388 strcpy(data + (idx++) * ETH_GSTRING_LEN,
389 q_stats_desc[i].format);
390
391 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
392 strcpy(data + (idx++) * ETH_GSTRING_LEN,
393 drop_rq_stats_desc[i].format);
394
fd8dcdb8
KH
395 return idx;
396}
397
96b12796 398static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
fd8dcdb8
KH
399{
400 int i;
401
402 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
7cbaf9a3
MS
403 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
404 q_stats_desc, i);
405 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
406 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
407 drop_rq_stats_desc, i);
fd8dcdb8
KH
408 return idx;
409}
410
96b12796 411static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
19386177
KH
412{
413 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
414 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
19386177 415
7cbaf9a3
MS
416 if (priv->q_counter &&
417 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
418 sizeof(out)))
419 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
420 out, out_of_buffer);
421 if (priv->drop_rq_q_counter &&
422 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
423 out, sizeof(out)))
424 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
425 out_of_buffer);
19386177
KH
426}
427
5c298143 428#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
0cfafd4b 429static const struct counter_desc vnic_env_stats_steer_desc[] = {
5c298143
MS
430 { "rx_steer_missed_packets",
431 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
432};
433
0cfafd4b
MS
434static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
435 { "dev_internal_queue_oob",
436 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
437};
438
439#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
440 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
441 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
442#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
443 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
444 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
5c298143 445
96b12796 446static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
5c298143 447{
0cfafd4b
MS
448 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
449 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
5c298143
MS
450}
451
96b12796 452static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
5c298143
MS
453{
454 int i;
455
0cfafd4b
MS
456 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
457 strcpy(data + (idx++) * ETH_GSTRING_LEN,
458 vnic_env_stats_steer_desc[i].format);
5c298143 459
0cfafd4b 460 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
5c298143 461 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0cfafd4b 462 vnic_env_stats_dev_oob_desc[i].format);
5c298143
MS
463 return idx;
464}
465
96b12796 466static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
5c298143
MS
467{
468 int i;
469
0cfafd4b 470 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
5c298143 471 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
0cfafd4b
MS
472 vnic_env_stats_steer_desc, i);
473
474 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
475 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
476 vnic_env_stats_dev_oob_desc, i);
5c298143
MS
477 return idx;
478}
479
96b12796 480static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
5c298143
MS
481{
482 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
483 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
484 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
485 struct mlx5_core_dev *mdev = priv->mdev;
486
487 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
488 return;
489
490 MLX5_SET(query_vnic_env_in, in, opcode,
491 MLX5_CMD_OP_QUERY_VNIC_ENV);
492 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
493 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
494 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
495}
496
40cab9f1
KH
497#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
498static const struct counter_desc vport_stats_desc[] = {
499 { "rx_vport_unicast_packets",
500 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
501 { "rx_vport_unicast_bytes",
502 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
503 { "tx_vport_unicast_packets",
504 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
505 { "tx_vport_unicast_bytes",
506 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
507 { "rx_vport_multicast_packets",
508 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
509 { "rx_vport_multicast_bytes",
510 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
511 { "tx_vport_multicast_packets",
512 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
513 { "tx_vport_multicast_bytes",
514 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
515 { "rx_vport_broadcast_packets",
516 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
517 { "rx_vport_broadcast_bytes",
518 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
519 { "tx_vport_broadcast_packets",
520 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
521 { "tx_vport_broadcast_bytes",
522 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
523 { "rx_vport_rdma_unicast_packets",
524 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
525 { "rx_vport_rdma_unicast_bytes",
526 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
527 { "tx_vport_rdma_unicast_packets",
528 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
529 { "tx_vport_rdma_unicast_bytes",
530 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
531 { "rx_vport_rdma_multicast_packets",
532 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
533 { "rx_vport_rdma_multicast_bytes",
534 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
535 { "tx_vport_rdma_multicast_packets",
536 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
537 { "tx_vport_rdma_multicast_bytes",
538 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
539};
540
541#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
542
96b12796 543static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
40cab9f1
KH
544{
545 return NUM_VPORT_COUNTERS;
546}
547
96b12796 548static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
40cab9f1
KH
549{
550 int i;
551
552 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
553 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
554 return idx;
555}
556
96b12796 557static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
40cab9f1
KH
558{
559 int i;
560
561 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
562 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
563 vport_stats_desc, i);
564 return idx;
565}
566
96b12796 567static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
19386177
KH
568{
569 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
570 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
571 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
572 struct mlx5_core_dev *mdev = priv->mdev;
573
574 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
575 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
576 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
577 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
578}
579
6e6ef814
KH
580#define PPORT_802_3_OFF(c) \
581 MLX5_BYTE_OFF(ppcnt_reg, \
582 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
583static const struct counter_desc pport_802_3_stats_desc[] = {
584 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
585 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
586 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
587 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
588 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
589 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
590 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
591 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
592 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
593 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
594 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
595 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
596 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
597 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
598 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
599 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
600 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
601 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
602};
603
604#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
605
96b12796 606static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
6e6ef814
KH
607{
608 return NUM_PPORT_802_3_COUNTERS;
609}
610
96b12796 611static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
6e6ef814
KH
612{
613 int i;
614
615 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
616 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
617 return idx;
618}
619
96b12796 620static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
6e6ef814
KH
621{
622 int i;
623
624 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
625 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
626 pport_802_3_stats_desc, i);
627 return idx;
628}
629
75370eb0
ED
630#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
631 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
632
96b12796 633MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
19386177
KH
634{
635 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
636 struct mlx5_core_dev *mdev = priv->mdev;
637 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
638 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
639 void *out;
640
75370eb0
ED
641 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
642 return;
643
19386177
KH
644 MLX5_SET(ppcnt_reg, in, local_port, 1);
645 out = pstats->IEEE_802_3_counters;
646 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
647 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
648}
649
fc8e64a3
KH
650#define PPORT_2863_OFF(c) \
651 MLX5_BYTE_OFF(ppcnt_reg, \
652 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
653static const struct counter_desc pport_2863_stats_desc[] = {
654 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
655 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
656 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
657};
658
659#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
660
96b12796 661static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
fc8e64a3
KH
662{
663 return NUM_PPORT_2863_COUNTERS;
664}
665
96b12796 666static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
fc8e64a3
KH
667{
668 int i;
669
670 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
671 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
672 return idx;
673}
674
96b12796 675static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
fc8e64a3
KH
676{
677 int i;
678
679 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
680 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
681 pport_2863_stats_desc, i);
682 return idx;
683}
684
96b12796 685static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
19386177
KH
686{
687 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
688 struct mlx5_core_dev *mdev = priv->mdev;
689 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
690 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
691 void *out;
692
693 MLX5_SET(ppcnt_reg, in, local_port, 1);
694 out = pstats->RFC_2863_counters;
695 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
696 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
697}
698
e0e0def9
KH
699#define PPORT_2819_OFF(c) \
700 MLX5_BYTE_OFF(ppcnt_reg, \
701 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
702static const struct counter_desc pport_2819_stats_desc[] = {
703 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
704 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
705 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
706 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
707 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
708 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
709 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
710 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
711 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
712 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
713 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
714 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
715 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
716};
717
718#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
719
96b12796 720static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
e0e0def9
KH
721{
722 return NUM_PPORT_2819_COUNTERS;
723}
724
96b12796 725static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
e0e0def9
KH
726{
727 int i;
728
729 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
730 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
731 return idx;
732}
733
96b12796 734static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
e0e0def9
KH
735{
736 int i;
737
738 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
739 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
740 pport_2819_stats_desc, i);
741 return idx;
742}
743
96b12796 744static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
19386177
KH
745{
746 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
747 struct mlx5_core_dev *mdev = priv->mdev;
748 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
749 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
750 void *out;
751
75370eb0
ED
752 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
753 return;
754
19386177
KH
755 MLX5_SET(ppcnt_reg, in, local_port, 1);
756 out = pstats->RFC_2819_counters;
757 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
758 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
759}
760
2e4df0b2
KH
761#define PPORT_PHY_STATISTICAL_OFF(c) \
762 MLX5_BYTE_OFF(ppcnt_reg, \
763 counter_set.phys_layer_statistical_cntrs.c##_high)
764static const struct counter_desc pport_phy_statistical_stats_desc[] = {
765 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
766 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
767};
768
4cb4e98e
SA
769static const struct counter_desc
770pport_phy_statistical_err_lanes_stats_desc[] = {
771 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
772 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
773 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
774 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
775};
776
777#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
778 ARRAY_SIZE(pport_phy_statistical_stats_desc)
779#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
780 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
2e4df0b2 781
96b12796 782static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
2e4df0b2 783{
4cb4e98e
SA
784 struct mlx5_core_dev *mdev = priv->mdev;
785 int num_stats;
786
6ab75516 787 /* "1" for link_down_events special counter */
4cb4e98e
SA
788 num_stats = 1;
789
790 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
791 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
792
793 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
794 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
795
796 return num_stats;
2e4df0b2
KH
797}
798
96b12796 799static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
2e4df0b2 800{
4cb4e98e 801 struct mlx5_core_dev *mdev = priv->mdev;
2e4df0b2
KH
802 int i;
803
6ab75516
SM
804 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
805
4cb4e98e 806 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
6ab75516
SM
807 return idx;
808
809 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
810 strcpy(data + (idx++) * ETH_GSTRING_LEN,
811 pport_phy_statistical_stats_desc[i].format);
4cb4e98e
SA
812
813 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
814 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
815 strcpy(data + (idx++) * ETH_GSTRING_LEN,
816 pport_phy_statistical_err_lanes_stats_desc[i].format);
817
2e4df0b2
KH
818 return idx;
819}
820
96b12796 821static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
2e4df0b2 822{
4cb4e98e 823 struct mlx5_core_dev *mdev = priv->mdev;
2e4df0b2
KH
824 int i;
825
6ab75516
SM
826 /* link_down_events_phy has special handling since it is not stored in __be64 format */
827 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
828 counter_set.phys_layer_cntrs.link_down_events);
829
4cb4e98e 830 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
6ab75516
SM
831 return idx;
832
833 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
834 data[idx++] =
835 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
836 pport_phy_statistical_stats_desc, i);
4cb4e98e
SA
837
838 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
839 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
840 data[idx++] =
841 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
842 pport_phy_statistical_err_lanes_stats_desc,
843 i);
2e4df0b2
KH
844 return idx;
845}
846
96b12796 847static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
19386177
KH
848{
849 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
850 struct mlx5_core_dev *mdev = priv->mdev;
851 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
852 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
853 void *out;
854
855 MLX5_SET(ppcnt_reg, in, local_port, 1);
856 out = pstats->phy_counters;
857 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
858 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
859
860 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
861 return;
862
863 out = pstats->phy_statistical_counters;
864 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
865 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
866}
867
3488bd4c
KH
868#define PPORT_ETH_EXT_OFF(c) \
869 MLX5_BYTE_OFF(ppcnt_reg, \
870 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
871static const struct counter_desc pport_eth_ext_stats_desc[] = {
872 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
873};
874
875#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
876
96b12796 877static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
3488bd4c
KH
878{
879 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
880 return NUM_PPORT_ETH_EXT_COUNTERS;
881
882 return 0;
883}
884
96b12796 885static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
3488bd4c
KH
886{
887 int i;
888
889 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
890 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
891 strcpy(data + (idx++) * ETH_GSTRING_LEN,
892 pport_eth_ext_stats_desc[i].format);
893 return idx;
894}
895
96b12796 896static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
3488bd4c
KH
897{
898 int i;
899
900 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
901 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
902 data[idx++] =
903 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
904 pport_eth_ext_stats_desc, i);
905 return idx;
906}
907
96b12796 908static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
19386177
KH
909{
910 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
911 struct mlx5_core_dev *mdev = priv->mdev;
912 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
913 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
914 void *out;
915
916 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
917 return;
918
919 MLX5_SET(ppcnt_reg, in, local_port, 1);
920 out = pstats->eth_ext_counters;
921 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
922 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
923}
924
9fd2b5f1
KH
925#define PCIE_PERF_OFF(c) \
926 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
927static const struct counter_desc pcie_perf_stats_desc[] = {
928 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
929 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
930};
931
932#define PCIE_PERF_OFF64(c) \
933 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
934static const struct counter_desc pcie_perf_stats_desc64[] = {
935 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
936};
937
938static const struct counter_desc pcie_perf_stall_stats_desc[] = {
939 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
940 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
941 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
942 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
943};
944
945#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
946#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
947#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
948
96b12796 949static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
9fd2b5f1
KH
950{
951 int num_stats = 0;
952
953 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
954 num_stats += NUM_PCIE_PERF_COUNTERS;
955
956 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
957 num_stats += NUM_PCIE_PERF_COUNTERS64;
958
959 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
960 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
961
962 return num_stats;
963}
964
96b12796 965static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
9fd2b5f1
KH
966{
967 int i;
968
969 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
970 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
971 strcpy(data + (idx++) * ETH_GSTRING_LEN,
972 pcie_perf_stats_desc[i].format);
973
974 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
975 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
976 strcpy(data + (idx++) * ETH_GSTRING_LEN,
977 pcie_perf_stats_desc64[i].format);
978
979 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
980 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
981 strcpy(data + (idx++) * ETH_GSTRING_LEN,
982 pcie_perf_stall_stats_desc[i].format);
983 return idx;
984}
985
96b12796 986static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
9fd2b5f1
KH
987{
988 int i;
989
990 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
991 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
992 data[idx++] =
993 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
994 pcie_perf_stats_desc, i);
995
996 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
997 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
998 data[idx++] =
999 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1000 pcie_perf_stats_desc64, i);
1001
1002 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1003 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1004 data[idx++] =
1005 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1006 pcie_perf_stall_stats_desc, i);
1007 return idx;
1008}
1009
96b12796 1010static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
19386177
KH
1011{
1012 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1013 struct mlx5_core_dev *mdev = priv->mdev;
1014 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1015 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1016 void *out;
1017
1018 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1019 return;
1020
1021 out = pcie_stats->pcie_perf_counters;
1022 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1023 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1024}
1025
1297d97f
AL
1026#define PPORT_PER_TC_PRIO_OFF(c) \
1027 MLX5_BYTE_OFF(ppcnt_reg, \
1028 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1029
1030static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1031 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1032};
1033
1034#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1035
1036#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1037 MLX5_BYTE_OFF(ppcnt_reg, \
1038 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1039
1040static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1041 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1042 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1043};
1044
1045#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1046 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1047
1048static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1049{
1050 struct mlx5_core_dev *mdev = priv->mdev;
1051
1052 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1053 return 0;
1054
1055 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1056}
1057
96b12796 1058static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1297d97f
AL
1059{
1060 struct mlx5_core_dev *mdev = priv->mdev;
1061 int i, prio;
1062
1063 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1064 return idx;
1065
1066 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1067 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1068 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1069 pport_per_tc_prio_stats_desc[i].format, prio);
1070 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1071 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1072 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1073 }
1074
1075 return idx;
1076}
1077
96b12796 1078static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1297d97f
AL
1079{
1080 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1081 struct mlx5_core_dev *mdev = priv->mdev;
1082 int i, prio;
1083
1084 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1085 return idx;
1086
1087 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1088 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1089 data[idx++] =
1090 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1091 pport_per_tc_prio_stats_desc, i);
1092 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1093 data[idx++] =
1094 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1095 pport_per_tc_congest_prio_stats_desc, i);
1096 }
1097
1098 return idx;
1099}
1100
1101static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1102{
1103 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1104 struct mlx5_core_dev *mdev = priv->mdev;
1105 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1106 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1107 void *out;
1108 int prio;
1109
1110 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1111 return;
1112
1113 MLX5_SET(ppcnt_reg, in, pnat, 2);
1114 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1115 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1116 out = pstats->per_tc_prio_counters[prio];
1117 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1118 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1119 }
1120}
1121
1122static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1123{
1124 struct mlx5_core_dev *mdev = priv->mdev;
1125
1126 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1127 return 0;
1128
1129 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1130}
1131
1132static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1133{
1134 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1135 struct mlx5_core_dev *mdev = priv->mdev;
1136 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1137 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1138 void *out;
1139 int prio;
1140
1141 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1142 return;
1143
1144 MLX5_SET(ppcnt_reg, in, pnat, 2);
1145 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1146 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1147 out = pstats->per_tc_congest_prio_counters[prio];
1148 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1149 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1150 }
1151}
1152
96b12796 1153static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1297d97f
AL
1154{
1155 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1156 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1157}
1158
96b12796 1159static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1297d97f
AL
1160{
1161 mlx5e_grp_per_tc_prio_update_stats(priv);
1162 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1163}
1164
4377bea2
KH
1165#define PPORT_PER_PRIO_OFF(c) \
1166 MLX5_BYTE_OFF(ppcnt_reg, \
1167 counter_set.eth_per_prio_grp_data_layout.c##_high)
e6000651
KH
1168static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1169 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1170 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
827a8cb2 1171 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
e6000651
KH
1172 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1173 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1174};
1175
1176#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1177
54c73f86 1178static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
e6000651
KH
1179{
1180 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1181}
1182
1183static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1184 u8 *data,
1185 int idx)
1186{
1187 int i, prio;
1188
1189 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1190 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1191 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1192 pport_per_prio_traffic_stats_desc[i].format, prio);
1193 }
1194
1195 return idx;
1196}
1197
1198static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1199 u64 *data,
1200 int idx)
1201{
1202 int i, prio;
1203
1204 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1205 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1206 data[idx++] =
1207 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1208 pport_per_prio_traffic_stats_desc, i);
1209 }
1210
1211 return idx;
1212}
1213
4377bea2
KH
1214static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1215 /* %s is "global" or "prio{i}" */
1216 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1217 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1218 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1219 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1220 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1221};
1222
2fcb12df 1223static const struct counter_desc pport_pfc_stall_stats_desc[] = {
8742c7eb 1224 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
2fcb12df
IK
1225 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1226};
1227
4377bea2 1228#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
2fcb12df
IK
1229#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1230 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1231 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
4377bea2
KH
1232
1233static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1234{
1235 struct mlx5_core_dev *mdev = priv->mdev;
1236 u8 pfc_en_tx;
1237 u8 pfc_en_rx;
1238 int err;
1239
1240 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1241 return 0;
1242
1243 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1244
1245 return err ? 0 : pfc_en_tx | pfc_en_rx;
1246}
1247
1248static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1249{
1250 struct mlx5_core_dev *mdev = priv->mdev;
1251 u32 rx_pause;
1252 u32 tx_pause;
1253 int err;
1254
1255 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1256 return false;
1257
1258 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1259
1260 return err ? false : rx_pause | tx_pause;
1261}
1262
1263static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1264{
1265 return (mlx5e_query_global_pause_combined(priv) +
1266 hweight8(mlx5e_query_pfc_combined(priv))) *
2fcb12df
IK
1267 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1268 NUM_PPORT_PFC_STALL_COUNTERS(priv);
4377bea2
KH
1269}
1270
1271static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1272 u8 *data,
1273 int idx)
1274{
1275 unsigned long pfc_combined;
1276 int i, prio;
1277
1278 pfc_combined = mlx5e_query_pfc_combined(priv);
1279 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1280 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1281 char pfc_string[ETH_GSTRING_LEN];
1282
1283 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1284 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1285 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1286 }
1287 }
1288
1289 if (mlx5e_query_global_pause_combined(priv)) {
1290 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1291 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1292 pport_per_prio_pfc_stats_desc[i].format, "global");
1293 }
1294 }
1295
2fcb12df
IK
1296 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1297 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1298 pport_pfc_stall_stats_desc[i].format);
1299
4377bea2
KH
1300 return idx;
1301}
1302
1303static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1304 u64 *data,
1305 int idx)
1306{
1307 unsigned long pfc_combined;
1308 int i, prio;
1309
1310 pfc_combined = mlx5e_query_pfc_combined(priv);
1311 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1312 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1313 data[idx++] =
1314 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1315 pport_per_prio_pfc_stats_desc, i);
1316 }
1317 }
1318
1319 if (mlx5e_query_global_pause_combined(priv)) {
1320 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1321 data[idx++] =
1322 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1323 pport_per_prio_pfc_stats_desc, i);
1324 }
1325 }
1326
2fcb12df
IK
1327 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1328 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1329 pport_pfc_stall_stats_desc, i);
1330
4377bea2
KH
1331 return idx;
1332}
1333
96b12796 1334static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
a8984281 1335{
54c73f86 1336 return mlx5e_grp_per_prio_traffic_get_num_stats() +
a8984281
KH
1337 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1338}
1339
96b12796 1340static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
a8984281
KH
1341{
1342 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1343 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1344 return idx;
1345}
1346
96b12796 1347static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
a8984281
KH
1348{
1349 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1350 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1351 return idx;
1352}
1353
96b12796 1354static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
19386177
KH
1355{
1356 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1357 struct mlx5_core_dev *mdev = priv->mdev;
1358 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1359 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1360 int prio;
1361 void *out;
1362
75370eb0
ED
1363 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1364 return;
1365
19386177
KH
1366 MLX5_SET(ppcnt_reg, in, local_port, 1);
1367 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1368 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1369 out = pstats->per_prio_counters[prio];
1370 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1371 mlx5_core_access_reg(mdev, in, sz, out, sz,
1372 MLX5_REG_PPCNT, 0, 0);
1373 }
1374}
1375
0e6f01a4 1376static const struct counter_desc mlx5e_pme_status_desc[] = {
c2fb3db2 1377 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
0e6f01a4
KH
1378};
1379
1380static const struct counter_desc mlx5e_pme_error_desc[] = {
c2fb3db2
MG
1381 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1382 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1383 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
0e6f01a4
KH
1384};
1385
1386#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1387#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1388
96b12796 1389static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
0e6f01a4
KH
1390{
1391 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1392}
1393
96b12796 1394static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
0e6f01a4
KH
1395{
1396 int i;
1397
1398 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1399 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1400
1401 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1402 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1403
1404 return idx;
1405}
1406
96b12796 1407static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
0e6f01a4 1408{
69c1280b 1409 struct mlx5_pme_stats pme_stats;
0e6f01a4
KH
1410 int i;
1411
69c1280b
SM
1412 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1413
0e6f01a4 1414 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
69c1280b 1415 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
0e6f01a4
KH
1416 mlx5e_pme_status_desc, i);
1417
1418 for (i = 0; i < NUM_PME_ERR_STATS; i++)
69c1280b 1419 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
0e6f01a4
KH
1420 mlx5e_pme_error_desc, i);
1421
1422 return idx;
1423}
1424
96b12796
SM
1425static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1426
1427static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
e185d43f
KH
1428{
1429 return mlx5e_ipsec_get_count(priv);
1430}
1431
96b12796 1432static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
e185d43f
KH
1433{
1434 return idx + mlx5e_ipsec_get_strings(priv,
1435 data + idx * ETH_GSTRING_LEN);
1436}
1437
96b12796 1438static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
e185d43f
KH
1439{
1440 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1441}
1442
96b12796 1443static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
19386177
KH
1444{
1445 mlx5e_ipsec_update_stats(priv);
1446}
1447
96b12796 1448static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
43585a41
IL
1449{
1450 return mlx5e_tls_get_count(priv);
1451}
1452
96b12796 1453static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
43585a41
IL
1454{
1455 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1456}
1457
96b12796 1458static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
43585a41
IL
1459{
1460 return idx + mlx5e_tls_get_stats(priv, data + idx);
1461}
1462
96b12796
SM
1463static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1464
1fe85006
KH
1465static const struct counter_desc rq_stats_desc[] = {
1466 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1467 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1468 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
0aa1d186
SM
1469 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1470 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1fe85006
KH
1471 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1472 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1473 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1474 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
86690b4b 1475 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1fe85006
KH
1476 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1477 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
f007c13d 1478 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
f24686e8 1479 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1fe85006 1480 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
b71ba6b4
TT
1481 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1482 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
0073c8f7 1483 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1fe85006
KH
1484 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1485 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1486 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1fe85006
KH
1487 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1488 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1489 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1490 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1491 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
dc983f0e 1492 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
94563847 1493 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
be5323c8 1494 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1fe85006
KH
1495};
1496
1497static const struct counter_desc sq_stats_desc[] = {
1498 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1499 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1500 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1501 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1502 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1503 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1504 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1505 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
f24686e8 1506 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1fe85006 1507 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
d2ead1f3
TT
1508#ifdef CONFIG_MLX5_EN_TLS
1509 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1510 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1511 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1512 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
d2ead1f3
TT
1513 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1514 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
46a3ea98
TT
1515 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1516 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1517 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1518 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
d2ead1f3 1519#endif
1fe85006
KH
1520 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1521 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1fe85006
KH
1522 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1523 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
db75373c 1524 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
86155656 1525 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
f65a59ff
TT
1526 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1527 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1fe85006
KH
1528};
1529
890388ad
TT
1530static const struct counter_desc rq_xdpsq_stats_desc[] = {
1531 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
73cab880 1532 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
c2273219 1533 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
6c085a8a 1534 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
890388ad
TT
1535 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1536 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1537 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1538};
1539
58b99ee3
TT
1540static const struct counter_desc xdpsq_stats_desc[] = {
1541 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
73cab880 1542 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
c2273219 1543 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
6c085a8a 1544 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
58b99ee3
TT
1545 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1546 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1547 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1548};
1549
db05815b
MM
1550static const struct counter_desc xskrq_stats_desc[] = {
1551 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1552 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1553 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1554 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1555 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1556 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1557 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1558 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1559 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1560 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1561 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1562 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1563 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1564 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1565 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1566 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1567 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1568 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1569 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1570};
1571
1572static const struct counter_desc xsksq_stats_desc[] = {
1573 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1574 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1575 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1576 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1577 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1578 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1579};
1580
57d689a8 1581static const struct counter_desc ch_stats_desc[] = {
a1bf74dc 1582 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2d7103c8
TT
1583 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1584 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1585 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
db05815b 1586 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
57d689a8
EBE
1587 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1588};
1589
1fe85006
KH
1590#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1591#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
58b99ee3 1592#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
890388ad 1593#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
db05815b
MM
1594#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1595#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
57d689a8 1596#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1fe85006 1597
96b12796 1598static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1fe85006 1599{
694826e3 1600 int max_nch = priv->max_nch;
05909bab 1601
05909bab
EBE
1602 return (NUM_RQ_STATS * max_nch) +
1603 (NUM_CH_STATS * max_nch) +
890388ad 1604 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
58b99ee3 1605 (NUM_RQ_XDPSQ_STATS * max_nch) +
db05815b
MM
1606 (NUM_XDPSQ_STATS * max_nch) +
1607 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1608 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1fe85006
KH
1609}
1610
96b12796 1611static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1fe85006 1612{
db05815b 1613 bool is_xsk = priv->xsk.ever_used;
694826e3 1614 int max_nch = priv->max_nch;
1fe85006
KH
1615 int i, j, tc;
1616
05909bab 1617 for (i = 0; i < max_nch; i++)
57d689a8
EBE
1618 for (j = 0; j < NUM_CH_STATS; j++)
1619 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1620 ch_stats_desc[j].format, i);
1621
890388ad 1622 for (i = 0; i < max_nch; i++) {
1fe85006 1623 for (j = 0; j < NUM_RQ_STATS; j++)
890388ad
TT
1624 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1625 rq_stats_desc[j].format, i);
db05815b
MM
1626 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1627 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1628 xskrq_stats_desc[j].format, i);
890388ad
TT
1629 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1630 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1631 rq_xdpsq_stats_desc[j].format, i);
1632 }
1fe85006 1633
05909bab
EBE
1634 for (tc = 0; tc < priv->max_opened_tc; tc++)
1635 for (i = 0; i < max_nch; i++)
1fe85006
KH
1636 for (j = 0; j < NUM_SQ_STATS; j++)
1637 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1638 sq_stats_desc[j].format,
c55d8b10 1639 i + tc * max_nch);
1fe85006 1640
db05815b
MM
1641 for (i = 0; i < max_nch; i++) {
1642 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1643 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1644 xsksq_stats_desc[j].format, i);
58b99ee3
TT
1645 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1646 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1647 xdpsq_stats_desc[j].format, i);
db05815b 1648 }
58b99ee3 1649
1fe85006
KH
1650 return idx;
1651}
1652
96b12796 1653static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1fe85006 1654{
db05815b 1655 bool is_xsk = priv->xsk.ever_used;
694826e3 1656 int max_nch = priv->max_nch;
1fe85006
KH
1657 int i, j, tc;
1658
05909bab 1659 for (i = 0; i < max_nch; i++)
57d689a8
EBE
1660 for (j = 0; j < NUM_CH_STATS; j++)
1661 data[idx++] =
05909bab 1662 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
57d689a8
EBE
1663 ch_stats_desc, j);
1664
890388ad 1665 for (i = 0; i < max_nch; i++) {
1fe85006
KH
1666 for (j = 0; j < NUM_RQ_STATS; j++)
1667 data[idx++] =
05909bab 1668 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1fe85006 1669 rq_stats_desc, j);
db05815b
MM
1670 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1671 data[idx++] =
1672 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1673 xskrq_stats_desc, j);
890388ad
TT
1674 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1675 data[idx++] =
1676 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1677 rq_xdpsq_stats_desc, j);
1678 }
1fe85006 1679
05909bab
EBE
1680 for (tc = 0; tc < priv->max_opened_tc; tc++)
1681 for (i = 0; i < max_nch; i++)
1fe85006
KH
1682 for (j = 0; j < NUM_SQ_STATS; j++)
1683 data[idx++] =
05909bab 1684 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1fe85006
KH
1685 sq_stats_desc, j);
1686
db05815b
MM
1687 for (i = 0; i < max_nch; i++) {
1688 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1689 data[idx++] =
1690 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1691 xsksq_stats_desc, j);
58b99ee3
TT
1692 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1693 data[idx++] =
1694 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1695 xdpsq_stats_desc, j);
db05815b 1696 }
58b99ee3 1697
1fe85006
KH
1698 return idx;
1699}
1700
96b12796
SM
1701static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
1702
19386177 1703/* The stats groups order is opposite to the update_stats() order calls */
3460c184 1704const struct mlx5e_stats_grp mlx5e_nic_stats_grps[] = {
96b12796
SM
1705 MLX5E_DEFINE_STATS_GRP(sw, 0),
1706 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS),
1707 MLX5E_DEFINE_STATS_GRP(vnic_env, 0),
1708 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS),
1709 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS),
1710 MLX5E_DEFINE_STATS_GRP(2863, 0),
1711 MLX5E_DEFINE_STATS_GRP(2819, 0),
1712 MLX5E_DEFINE_STATS_GRP(phy, 0),
1713 MLX5E_DEFINE_STATS_GRP(eth_ext, 0),
1714 MLX5E_DEFINE_STATS_GRP(pcie, 0),
1715 MLX5E_DEFINE_STATS_GRP(per_prio, 0),
1716 MLX5E_DEFINE_STATS_GRP(pme, 0),
1717 MLX5E_DEFINE_STATS_GRP(ipsec, 0),
1718 MLX5E_DEFINE_STATS_GRP(tls, 0),
1719 MLX5E_DEFINE_STATS_GRP(channels, 0),
1720 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0),
c0752f2b
KH
1721};
1722
3460c184
SM
1723unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
1724{
1725 return ARRAY_SIZE(mlx5e_nic_stats_grps);
1726}