]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/bnx2x/bnx2x_stats.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / bnx2x / bnx2x_stats.c
1 /*-
2 * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
3 *
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
7 *
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015 QLogic Corporation.
10 * All rights reserved.
11 * www.qlogic.com
12 *
13 * See LICENSE.bnx2x_pmd for copyright and licensing details.
14 */
15
16 #include "bnx2x.h"
17 #include "bnx2x_stats.h"
18
19 #ifdef __i386__
20 #define BITS_PER_LONG 32
21 #else
22 #define BITS_PER_LONG 64
23 #endif
24
25 static inline uint16_t
26 bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
27 {
28 uint16_t res = 0;
29 uint32_t size;
30
31 /* 'newest' convention - shmem2 contains the size of the port stats */
32 if (SHMEM2_HAS(sc, sizeof_port_stats)) {
33 size = SHMEM2_RD(sc, sizeof_port_stats);
34 if (size) {
35 res = size;
36 }
37
38 /* prevent newer BC from causing buffer overflow */
39 if (res > sizeof(struct host_port_stats)) {
40 res = sizeof(struct host_port_stats);
41 }
42 }
43
44 /*
45 * Older convention - all BCs support the port stats fields up until
46 * the 'not_used' field
47 */
48 if (!res) {
49 res = (offsetof(struct host_port_stats, not_used) + 4);
50
51 /* if PFC stats are supported by the MFW, DMA them as well */
52 if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
53 res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
54 offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
55 }
56 }
57
58 res >>= 2;
59
60 return res;
61 }
62
63 /*
64 * Init service functions
65 */
66
67 /*
68 * Post the next statistics ramrod. Protect it with the lock in
69 * order to ensure the strict order between statistics ramrods
70 * (each ramrod has a sequence number passed in a
71 * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
72 * sent in order).
73 */
74 static void
75 bnx2x_storm_stats_post(struct bnx2x_softc *sc)
76 {
77 int rc;
78
79 if (!sc->stats_pending) {
80 if (sc->stats_pending) {
81 return;
82 }
83
84 sc->fw_stats_req->hdr.drv_stats_counter =
85 htole16(sc->stats_counter++);
86
87 PMD_DEBUG_PERIODIC_LOG(DEBUG,
88 "sending statistics ramrod %d",
89 le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
90
91 /* adjust the ramrod to include VF queues statistics */
92
93 /* send FW stats ramrod */
94 rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
95 U64_HI(sc->fw_stats_req_mapping),
96 U64_LO(sc->fw_stats_req_mapping),
97 NONE_CONNECTION_TYPE);
98 if (rc == 0) {
99 sc->stats_pending = 1;
100 }
101 }
102 }
103
104 static void
105 bnx2x_hw_stats_post(struct bnx2x_softc *sc)
106 {
107 struct dmae_command *dmae = &sc->stats_dmae;
108 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
109 int loader_idx;
110 uint32_t opcode;
111
112 *stats_comp = DMAE_COMP_VAL;
113 if (CHIP_REV_IS_SLOW(sc)) {
114 return;
115 }
116
117 /* Update MCP's statistics if possible */
118 if (sc->func_stx) {
119 rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
120 sizeof(sc->func_stats));
121 }
122
123 /* loader */
124 if (sc->executer_idx) {
125 loader_idx = PMF_DMAE_C(sc);
126 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
127 TRUE, DMAE_COMP_GRC);
128 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
129
130 memset(dmae, 0, sizeof(struct dmae_command));
131 dmae->opcode = opcode;
132 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
133 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
134 dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
135 sizeof(struct dmae_command) *
136 (loader_idx + 1)) >> 2);
137 dmae->dst_addr_hi = 0;
138 dmae->len = sizeof(struct dmae_command) >> 2;
139 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
140 dmae->comp_addr_hi = 0;
141 dmae->comp_val = 1;
142
143 *stats_comp = 0;
144 bnx2x_post_dmae(sc, dmae, loader_idx);
145 } else if (sc->func_stx) {
146 *stats_comp = 0;
147 bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
148 }
149 }
150
151 static int
152 bnx2x_stats_comp(struct bnx2x_softc *sc)
153 {
154 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
155 int cnt = 10;
156
157 while (*stats_comp != DMAE_COMP_VAL) {
158 if (!cnt) {
159 PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
160 break;
161 }
162
163 cnt--;
164 DELAY(1000);
165 }
166
167 return 1;
168 }
169
170 /*
171 * Statistics service functions
172 */
173
174 static void
175 bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
176 {
177 struct dmae_command *dmae;
178 uint32_t opcode;
179 int loader_idx = PMF_DMAE_C(sc);
180 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
181
182 if (sc->devinfo.bc_ver <= 0x06001400) {
183 /*
184 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
185 * BRB registers while the BRB block is in reset. The DMA transfer
186 * below triggers this issue resulting in the DMAE to stop
187 * functioning. Skip this initial stats transfer for old bootcode
188 * versions <= 6.0.20.
189 */
190 return;
191 }
192 /* sanity */
193 if (!sc->port.pmf || !sc->port.port_stx) {
194 PMD_DRV_LOG(ERR, "BUG!");
195 return;
196 }
197
198 sc->executer_idx = 0;
199
200 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
201
202 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
203 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
204 dmae->src_addr_lo = (sc->port.port_stx >> 2);
205 dmae->src_addr_hi = 0;
206 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
207 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
208 dmae->len = DMAE_LEN32_RD_MAX;
209 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
210 dmae->comp_addr_hi = 0;
211 dmae->comp_val = 1;
212
213 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
214 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
215 dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
216 dmae->src_addr_hi = 0;
217 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
218 DMAE_LEN32_RD_MAX * 4);
219 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
220 DMAE_LEN32_RD_MAX * 4);
221 dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
222
223 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
224 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
226
227 *stats_comp = 0;
228 bnx2x_hw_stats_post(sc);
229 bnx2x_stats_comp(sc);
230 }
231
232 static void
233 bnx2x_port_stats_init(struct bnx2x_softc *sc)
234 {
235 struct dmae_command *dmae;
236 int port = SC_PORT(sc);
237 uint32_t opcode;
238 int loader_idx = PMF_DMAE_C(sc);
239 uint32_t mac_addr;
240 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
241
242 /* sanity */
243 if (!sc->link_vars.link_up || !sc->port.pmf) {
244 PMD_DRV_LOG(ERR, "BUG!");
245 return;
246 }
247
248 sc->executer_idx = 0;
249
250 /* MCP */
251 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
252 TRUE, DMAE_COMP_GRC);
253
254 if (sc->port.port_stx) {
255 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
256 dmae->opcode = opcode;
257 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
258 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
259 dmae->dst_addr_lo = sc->port.port_stx >> 2;
260 dmae->dst_addr_hi = 0;
261 dmae->len = bnx2x_get_port_stats_dma_len(sc);
262 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
263 dmae->comp_addr_hi = 0;
264 dmae->comp_val = 1;
265 }
266
267 if (sc->func_stx) {
268 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
269 dmae->opcode = opcode;
270 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
271 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
272 dmae->dst_addr_lo = (sc->func_stx >> 2);
273 dmae->dst_addr_hi = 0;
274 dmae->len = (sizeof(struct host_func_stats) >> 2);
275 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
276 dmae->comp_addr_hi = 0;
277 dmae->comp_val = 1;
278 }
279
280 /* MAC */
281 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
282 TRUE, DMAE_COMP_GRC);
283
284 /* EMAC is special */
285 if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
286 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
287
288 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
289 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
290 dmae->opcode = opcode;
291 dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
292 dmae->src_addr_hi = 0;
293 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
294 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
295 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
296 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
297 dmae->comp_addr_hi = 0;
298 dmae->comp_val = 1;
299
300 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
301 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
302 dmae->opcode = opcode;
303 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
304 dmae->src_addr_hi = 0;
305 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
306 offsetof(struct emac_stats,
307 rx_stat_falsecarriererrors));
308 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
309 offsetof(struct emac_stats,
310 rx_stat_falsecarriererrors));
311 dmae->len = 1;
312 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
313 dmae->comp_addr_hi = 0;
314 dmae->comp_val = 1;
315
316 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
317 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
318 dmae->opcode = opcode;
319 dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
320 dmae->src_addr_hi = 0;
321 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
322 offsetof(struct emac_stats,
323 tx_stat_ifhcoutoctets));
324 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
325 offsetof(struct emac_stats,
326 tx_stat_ifhcoutoctets));
327 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
328 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
329 dmae->comp_addr_hi = 0;
330 dmae->comp_val = 1;
331 } else {
332 uint32_t tx_src_addr_lo, rx_src_addr_lo;
333 uint16_t rx_len, tx_len;
334
335 /* configure the params according to MAC type */
336 switch (sc->link_vars.mac_type) {
337 case ELINK_MAC_TYPE_BMAC:
338 mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
339 NIG_REG_INGRESS_BMAC0_MEM;
340
341 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
342 BIGMAC_REGISTER_TX_STAT_GTBYT */
343 if (CHIP_IS_E1x(sc)) {
344 tx_src_addr_lo =
345 ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
346 tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
347 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
348 rx_src_addr_lo =
349 ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
350 rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
351 BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
352 } else {
353 tx_src_addr_lo =
354 ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
355 tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
356 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
357 rx_src_addr_lo =
358 ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
359 rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
360 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
361 }
362
363 break;
364
365 case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
366 case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
367 default:
368 mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
369 tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
370 rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
371 tx_len =
372 (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
373 rx_len =
374 (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
375 break;
376 }
377
378 /* TX stats */
379 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
380 dmae->opcode = opcode;
381 dmae->src_addr_lo = tx_src_addr_lo;
382 dmae->src_addr_hi = 0;
383 dmae->len = tx_len;
384 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
385 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
387 dmae->comp_addr_hi = 0;
388 dmae->comp_val = 1;
389
390 /* RX stats */
391 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
392 dmae->opcode = opcode;
393 dmae->src_addr_hi = 0;
394 dmae->src_addr_lo = rx_src_addr_lo;
395 dmae->dst_addr_lo =
396 U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
397 dmae->dst_addr_hi =
398 U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
399 dmae->len = rx_len;
400 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
401 dmae->comp_addr_hi = 0;
402 dmae->comp_val = 1;
403 }
404
405 /* NIG */
406 if (!CHIP_IS_E3(sc)) {
407 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
408 dmae->opcode = opcode;
409 dmae->src_addr_lo =
410 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
411 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
412 dmae->src_addr_hi = 0;
413 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
414 offsetof(struct nig_stats,
415 egress_mac_pkt0_lo));
416 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
417 offsetof(struct nig_stats,
418 egress_mac_pkt0_lo));
419 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
420 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
421 dmae->comp_addr_hi = 0;
422 dmae->comp_val = 1;
423
424 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
425 dmae->opcode = opcode;
426 dmae->src_addr_lo =
427 (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
428 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
429 dmae->src_addr_hi = 0;
430 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
431 offsetof(struct nig_stats,
432 egress_mac_pkt1_lo));
433 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
434 offsetof(struct nig_stats,
435 egress_mac_pkt1_lo));
436 dmae->len = ((2 * sizeof(uint32_t)) >> 2);
437 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
438 dmae->comp_addr_hi = 0;
439 dmae->comp_val = 1;
440 }
441
442 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
443 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
444 TRUE, DMAE_COMP_PCI);
445 dmae->src_addr_lo =
446 (port ? NIG_REG_STAT1_BRB_DISCARD :
447 NIG_REG_STAT0_BRB_DISCARD) >> 2;
448 dmae->src_addr_hi = 0;
449 dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
450 dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
451 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
452
453 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
454 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
455 dmae->comp_val = DMAE_COMP_VAL;
456
457 *stats_comp = 0;
458 }
459
460 static void
461 bnx2x_func_stats_init(struct bnx2x_softc *sc)
462 {
463 struct dmae_command *dmae = &sc->stats_dmae;
464 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
465
466 /* sanity */
467 if (!sc->func_stx) {
468 PMD_DRV_LOG(ERR, "BUG!");
469 return;
470 }
471
472 sc->executer_idx = 0;
473 memset(dmae, 0, sizeof(struct dmae_command));
474
475 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
476 TRUE, DMAE_COMP_PCI);
477 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
478 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
479 dmae->dst_addr_lo = (sc->func_stx >> 2);
480 dmae->dst_addr_hi = 0;
481 dmae->len = (sizeof(struct host_func_stats) >> 2);
482 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
483 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
484 dmae->comp_val = DMAE_COMP_VAL;
485
486 *stats_comp = 0;
487 }
488
489 static void
490 bnx2x_stats_start(struct bnx2x_softc *sc)
491 {
492 /*
493 * VFs travel through here as part of the statistics FSM, but no action
494 * is required
495 */
496 if (IS_VF(sc)) {
497 return;
498 }
499
500 if (sc->port.pmf) {
501 bnx2x_port_stats_init(sc);
502 }
503
504 else if (sc->func_stx) {
505 bnx2x_func_stats_init(sc);
506 }
507
508 bnx2x_hw_stats_post(sc);
509 bnx2x_storm_stats_post(sc);
510 }
511
512 static void
513 bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
514 {
515 bnx2x_stats_comp(sc);
516 bnx2x_stats_pmf_update(sc);
517 bnx2x_stats_start(sc);
518 }
519
520 static void
521 bnx2x_stats_restart(struct bnx2x_softc *sc)
522 {
523 /*
524 * VFs travel through here as part of the statistics FSM, but no action
525 * is required
526 */
527 if (IS_VF(sc)) {
528 return;
529 }
530
531 bnx2x_stats_comp(sc);
532 bnx2x_stats_start(sc);
533 }
534
535 static void
536 bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
537 {
538 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
539 struct bnx2x_eth_stats *estats = &sc->eth_stats;
540 struct {
541 uint32_t lo;
542 uint32_t hi;
543 } diff;
544
545 if (CHIP_IS_E1x(sc)) {
546 struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
547
548 /* the macros below will use "bmac1_stats" type */
549 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
550 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
551 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
552 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
553 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
554 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
555 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
556 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
558
559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
562 UPDATE_STAT64(tx_stat_gt127,
563 tx_stat_etherstatspkts65octetsto127octets);
564 UPDATE_STAT64(tx_stat_gt255,
565 tx_stat_etherstatspkts128octetsto255octets);
566 UPDATE_STAT64(tx_stat_gt511,
567 tx_stat_etherstatspkts256octetsto511octets);
568 UPDATE_STAT64(tx_stat_gt1023,
569 tx_stat_etherstatspkts512octetsto1023octets);
570 UPDATE_STAT64(tx_stat_gt1518,
571 tx_stat_etherstatspkts1024octetsto1522octets);
572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
576 UPDATE_STAT64(tx_stat_gterr,
577 tx_stat_dot3statsinternalmactransmiterrors);
578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
579 } else {
580 struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
581 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
582
583 /* the macros below will use "bmac2_stats" type */
584 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
585 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
586 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
587 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
588 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
589 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
590 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
591 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
592 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
593 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
594 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
595 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
596 UPDATE_STAT64(tx_stat_gt127,
597 tx_stat_etherstatspkts65octetsto127octets);
598 UPDATE_STAT64(tx_stat_gt255,
599 tx_stat_etherstatspkts128octetsto255octets);
600 UPDATE_STAT64(tx_stat_gt511,
601 tx_stat_etherstatspkts256octetsto511octets);
602 UPDATE_STAT64(tx_stat_gt1023,
603 tx_stat_etherstatspkts512octetsto1023octets);
604 UPDATE_STAT64(tx_stat_gt1518,
605 tx_stat_etherstatspkts1024octetsto1522octets);
606 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
607 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
608 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
609 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
610 UPDATE_STAT64(tx_stat_gterr,
611 tx_stat_dot3statsinternalmactransmiterrors);
612 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
613
614 /* collect PFC stats */
615 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
616 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
617 ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
618 pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
619
620 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
621 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
622 ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
623 pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
624 }
625
626 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
627 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
628
629 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
630 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
631
632 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
633 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
634 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
635 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
636 }
637
638 static void
639 bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
640 {
641 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
642 struct bnx2x_eth_stats *estats = &sc->eth_stats;
643 struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
644
645 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
646 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
647 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
648 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
649 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
650 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
651 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
652 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
653 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
654 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
655
656 /* collect pfc stats */
657 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
658 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
659 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
660 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
661
662 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
663 ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
664 ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
665 ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
666 ADD_STAT64(stats_tx.tx_gt1023,
667 tx_stat_etherstatspkts512octetsto1023octets);
668 ADD_STAT64(stats_tx.tx_gt1518,
669 tx_stat_etherstatspkts1024octetsto1522octets);
670 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
671
672 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
673 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
674 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
675
676 ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
677 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
678
679 estats->etherstatspkts1024octetsto1522octets_hi =
680 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
681 estats->etherstatspkts1024octetsto1522octets_lo =
682 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
683
684 estats->etherstatspktsover1522octets_hi =
685 pstats->mac_stx[1].tx_stat_mac_2047_hi;
686 estats->etherstatspktsover1522octets_lo =
687 pstats->mac_stx[1].tx_stat_mac_2047_lo;
688
689 ADD_64(estats->etherstatspktsover1522octets_hi,
690 pstats->mac_stx[1].tx_stat_mac_4095_hi,
691 estats->etherstatspktsover1522octets_lo,
692 pstats->mac_stx[1].tx_stat_mac_4095_lo);
693
694 ADD_64(estats->etherstatspktsover1522octets_hi,
695 pstats->mac_stx[1].tx_stat_mac_9216_hi,
696 estats->etherstatspktsover1522octets_lo,
697 pstats->mac_stx[1].tx_stat_mac_9216_lo);
698
699 ADD_64(estats->etherstatspktsover1522octets_hi,
700 pstats->mac_stx[1].tx_stat_mac_16383_hi,
701 estats->etherstatspktsover1522octets_lo,
702 pstats->mac_stx[1].tx_stat_mac_16383_lo);
703
704 estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
705 estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
706
707 estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
708 estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
709
710 estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
711 estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
712 estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
713 estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
714 }
715
716 static void
717 bnx2x_emac_stats_update(struct bnx2x_softc *sc)
718 {
719 struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
720 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
721 struct bnx2x_eth_stats *estats = &sc->eth_stats;
722
723 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
724 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
725 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
726 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
727 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
728 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
729 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
730 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
731 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
732 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
733 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
734 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
735 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
736 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
737 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
738 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
739 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
740 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
741 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
742 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
743 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
744 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
745 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
746 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
747 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
748 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
749 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
750 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
751 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
752 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
753 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
754
755 estats->pause_frames_received_hi =
756 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
757 estats->pause_frames_received_lo =
758 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
759 ADD_64(estats->pause_frames_received_hi,
760 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
761 estats->pause_frames_received_lo,
762 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
763
764 estats->pause_frames_sent_hi =
765 pstats->mac_stx[1].tx_stat_outxonsent_hi;
766 estats->pause_frames_sent_lo =
767 pstats->mac_stx[1].tx_stat_outxonsent_lo;
768 ADD_64(estats->pause_frames_sent_hi,
769 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
770 estats->pause_frames_sent_lo,
771 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
772 }
773
774 static int
775 bnx2x_hw_stats_update(struct bnx2x_softc *sc)
776 {
777 struct nig_stats *new = BNX2X_SP(sc, nig_stats);
778 struct nig_stats *old = &(sc->port.old_nig_stats);
779 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
780 struct bnx2x_eth_stats *estats = &sc->eth_stats;
781 uint32_t lpi_reg, nig_timer_max;
782 struct {
783 uint32_t lo;
784 uint32_t hi;
785 } diff;
786
787 switch (sc->link_vars.mac_type) {
788 case ELINK_MAC_TYPE_BMAC:
789 bnx2x_bmac_stats_update(sc);
790 break;
791
792 case ELINK_MAC_TYPE_EMAC:
793 bnx2x_emac_stats_update(sc);
794 break;
795
796 case ELINK_MAC_TYPE_UMAC:
797 case ELINK_MAC_TYPE_XMAC:
798 bnx2x_mstat_stats_update(sc);
799 break;
800
801 case ELINK_MAC_TYPE_NONE: /* unreached */
802 PMD_DRV_LOG(DEBUG,
803 "stats updated by DMAE but no MAC active");
804 return -1;
805
806 default: /* unreached */
807 PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
808 }
809
810 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
811 new->brb_discard - old->brb_discard);
812 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
813 new->brb_truncate - old->brb_truncate);
814
815 if (!CHIP_IS_E3(sc)) {
816 UPDATE_STAT64_NIG(egress_mac_pkt0,
817 etherstatspkts1024octetsto1522octets);
818 UPDATE_STAT64_NIG(egress_mac_pkt1,
819 etherstatspktsover1522octets);
820 }
821
822 rte_memcpy(old, new, sizeof(struct nig_stats));
823
824 rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
825 sizeof(struct mac_stx));
826 estats->brb_drop_hi = pstats->brb_drop_hi;
827 estats->brb_drop_lo = pstats->brb_drop_lo;
828
829 pstats->host_port_stats_counter++;
830
831 if (CHIP_IS_E3(sc)) {
832 lpi_reg = (SC_PORT(sc)) ?
833 MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
834 MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
835 estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
836 }
837
838 if (!BNX2X_NOMCP(sc)) {
839 nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
840 if (nig_timer_max != estats->nig_timer_max) {
841 estats->nig_timer_max = nig_timer_max;
842 PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
843 estats->nig_timer_max);
844 }
845 }
846
847 return 0;
848 }
849
850 static int
851 bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
852 {
853 struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
854 uint16_t cur_stats_counter;
855
856 /*
857 * Make sure we use the value of the counter
858 * used for sending the last stats ramrod.
859 */
860 cur_stats_counter = (sc->stats_counter - 1);
861
862 /* are storm stats valid? */
863 if (le16toh(counters->xstats_counter) != cur_stats_counter) {
864 PMD_DRV_LOG(DEBUG,
865 "stats not updated by xstorm, "
866 "counter 0x%x != stats_counter 0x%x",
867 le16toh(counters->xstats_counter), sc->stats_counter);
868 return -EAGAIN;
869 }
870
871 if (le16toh(counters->ustats_counter) != cur_stats_counter) {
872 PMD_DRV_LOG(DEBUG,
873 "stats not updated by ustorm, "
874 "counter 0x%x != stats_counter 0x%x",
875 le16toh(counters->ustats_counter), sc->stats_counter);
876 return -EAGAIN;
877 }
878
879 if (le16toh(counters->cstats_counter) != cur_stats_counter) {
880 PMD_DRV_LOG(DEBUG,
881 "stats not updated by cstorm, "
882 "counter 0x%x != stats_counter 0x%x",
883 le16toh(counters->cstats_counter), sc->stats_counter);
884 return -EAGAIN;
885 }
886
887 if (le16toh(counters->tstats_counter) != cur_stats_counter) {
888 PMD_DRV_LOG(DEBUG,
889 "stats not updated by tstorm, "
890 "counter 0x%x != stats_counter 0x%x",
891 le16toh(counters->tstats_counter), sc->stats_counter);
892 return -EAGAIN;
893 }
894
895 return 0;
896 }
897
898 static int
899 bnx2x_storm_stats_update(struct bnx2x_softc *sc)
900 {
901 struct tstorm_per_port_stats *tport =
902 &sc->fw_stats_data->port.tstorm_port_statistics;
903 struct tstorm_per_pf_stats *tfunc =
904 &sc->fw_stats_data->pf.tstorm_pf_statistics;
905 struct host_func_stats *fstats = &sc->func_stats;
906 struct bnx2x_eth_stats *estats = &sc->eth_stats;
907 struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
908 int i;
909
910 /* vfs stat counter is managed by pf */
911 if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) {
912 return -EAGAIN;
913 }
914
915 estats->error_bytes_received_hi = 0;
916 estats->error_bytes_received_lo = 0;
917
918 for (i = 0; i < sc->num_queues; i++) {
919 struct bnx2x_fastpath *fp = &sc->fp[i];
920 struct tstorm_per_queue_stats *tclient =
921 &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
922 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
923 struct ustorm_per_queue_stats *uclient =
924 &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
925 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
926 struct xstorm_per_queue_stats *xclient =
927 &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
928 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
929 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
930 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
931
932 uint32_t diff;
933
934 /* PMD_DRV_LOG(DEBUG,
935 "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
936 i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
937 xclient->mcast_pkts_sent);
938
939 PMD_DRV_LOG(DEBUG, "---------------"); */
940
941 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
942 total_broadcast_bytes_received);
943 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
944 total_multicast_bytes_received);
945 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
946 total_unicast_bytes_received);
947
948 /*
949 * sum to total_bytes_received all
950 * unicast/multicast/broadcast
951 */
952 qstats->total_bytes_received_hi =
953 qstats->total_broadcast_bytes_received_hi;
954 qstats->total_bytes_received_lo =
955 qstats->total_broadcast_bytes_received_lo;
956
957 ADD_64(qstats->total_bytes_received_hi,
958 qstats->total_multicast_bytes_received_hi,
959 qstats->total_bytes_received_lo,
960 qstats->total_multicast_bytes_received_lo);
961
962 ADD_64(qstats->total_bytes_received_hi,
963 qstats->total_unicast_bytes_received_hi,
964 qstats->total_bytes_received_lo,
965 qstats->total_unicast_bytes_received_lo);
966
967 qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
968 qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
969
970 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
971 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
972 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
973 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
974 etherstatsoverrsizepkts, 32);
975 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
976
977 SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
978 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
979 total_multicast_packets_received);
980 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
981 total_broadcast_packets_received);
982 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
983 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
984 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
985
986 UPDATE_QSTAT(xclient->bcast_bytes_sent,
987 total_broadcast_bytes_transmitted);
988 UPDATE_QSTAT(xclient->mcast_bytes_sent,
989 total_multicast_bytes_transmitted);
990 UPDATE_QSTAT(xclient->ucast_bytes_sent,
991 total_unicast_bytes_transmitted);
992
993 /*
994 * sum to total_bytes_transmitted all
995 * unicast/multicast/broadcast
996 */
997 qstats->total_bytes_transmitted_hi =
998 qstats->total_unicast_bytes_transmitted_hi;
999 qstats->total_bytes_transmitted_lo =
1000 qstats->total_unicast_bytes_transmitted_lo;
1001
1002 ADD_64(qstats->total_bytes_transmitted_hi,
1003 qstats->total_broadcast_bytes_transmitted_hi,
1004 qstats->total_bytes_transmitted_lo,
1005 qstats->total_broadcast_bytes_transmitted_lo);
1006
1007 ADD_64(qstats->total_bytes_transmitted_hi,
1008 qstats->total_multicast_bytes_transmitted_hi,
1009 qstats->total_bytes_transmitted_lo,
1010 qstats->total_multicast_bytes_transmitted_lo);
1011
1012 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1013 total_unicast_packets_transmitted);
1014 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1015 total_multicast_packets_transmitted);
1016 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1017 total_broadcast_packets_transmitted);
1018
1019 UPDATE_EXTEND_TSTAT(checksum_discard,
1020 total_packets_received_checksum_discarded);
1021 UPDATE_EXTEND_TSTAT(ttl0_discard,
1022 total_packets_received_ttl0_discarded);
1023
1024 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1025 total_transmitted_dropped_packets_error);
1026
1027 UPDATE_FSTAT_QSTAT(total_bytes_received);
1028 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1029 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1030 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1031 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1032 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1033 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1034 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1035 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1036 }
1037
1038 ADD_64(estats->total_bytes_received_hi,
1039 estats->rx_stat_ifhcinbadoctets_hi,
1040 estats->total_bytes_received_lo,
1041 estats->rx_stat_ifhcinbadoctets_lo);
1042
1043 ADD_64_LE(estats->total_bytes_received_hi,
1044 tfunc->rcv_error_bytes.hi,
1045 estats->total_bytes_received_lo,
1046 tfunc->rcv_error_bytes.lo);
1047
1048 ADD_64_LE(estats->error_bytes_received_hi,
1049 tfunc->rcv_error_bytes.hi,
1050 estats->error_bytes_received_lo,
1051 tfunc->rcv_error_bytes.lo);
1052
1053 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1054
1055 ADD_64(estats->error_bytes_received_hi,
1056 estats->rx_stat_ifhcinbadoctets_hi,
1057 estats->error_bytes_received_lo,
1058 estats->rx_stat_ifhcinbadoctets_lo);
1059
1060 if (sc->port.pmf) {
1061 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1062 UPDATE_FW_STAT(mac_filter_discard);
1063 UPDATE_FW_STAT(mf_tag_discard);
1064 UPDATE_FW_STAT(brb_truncate_discard);
1065 UPDATE_FW_STAT(mac_discard);
1066 }
1067
1068 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1069
1070 sc->stats_pending = 0;
1071
1072 return 0;
1073 }
1074
1075 static void
1076 bnx2x_drv_stats_update(struct bnx2x_softc *sc)
1077 {
1078 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1079 int i;
1080
1081 for (i = 0; i < sc->num_queues; i++) {
1082 struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1083 struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1084
1085 UPDATE_ESTAT_QSTAT(rx_calls);
1086 UPDATE_ESTAT_QSTAT(rx_pkts);
1087 UPDATE_ESTAT_QSTAT(rx_soft_errors);
1088 UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1089 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1090 UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1091 UPDATE_ESTAT_QSTAT(rx_budget_reached);
1092 UPDATE_ESTAT_QSTAT(tx_pkts);
1093 UPDATE_ESTAT_QSTAT(tx_soft_errors);
1094 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1095 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1096 UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1097 UPDATE_ESTAT_QSTAT(tx_encap_failures);
1098 UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1099 UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1100 UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1101 UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1102 UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1103 UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1104 UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1105 UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1106
1107 /* mbuf driver statistics */
1108 UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1109 UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1110 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1111 UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1112
1113 /* track the number of allocated mbufs */
1114 UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1115 UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1116 }
1117 }
1118
1119 static uint8_t
1120 bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
1121 {
1122 uint32_t val;
1123
1124 if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1125 val = SHMEM2_RD(sc, edebug_driver_if[1]);
1126
1127 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1128 return TRUE;
1129 }
1130 }
1131
1132 return FALSE;
1133 }
1134
1135 static void
1136 bnx2x_stats_update(struct bnx2x_softc *sc)
1137 {
1138 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1139
1140 if (bnx2x_edebug_stats_stopped(sc)) {
1141 return;
1142 }
1143
1144 if (IS_PF(sc)) {
1145
1146 bnx2x_storm_stats_update(sc);
1147 bnx2x_hw_stats_post(sc);
1148 bnx2x_storm_stats_post(sc);
1149 DELAY_MS(5);
1150
1151 if (*stats_comp != DMAE_COMP_VAL) {
1152 return;
1153 }
1154
1155 if (sc->port.pmf) {
1156 bnx2x_hw_stats_update(sc);
1157 }
1158
1159 if (bnx2x_storm_stats_update(sc)) {
1160 if (sc->stats_pending++ == 3) {
1161 rte_panic("storm stats not updated for 3 times");
1162 }
1163 return;
1164 }
1165 } else {
1166 /*
1167 * VF doesn't collect HW statistics, and doesn't get completions,
1168 * performs only update.
1169 */
1170 bnx2x_storm_stats_update(sc);
1171 }
1172
1173 bnx2x_drv_stats_update(sc);
1174 }
1175
1176 static void
1177 bnx2x_port_stats_stop(struct bnx2x_softc *sc)
1178 {
1179 struct dmae_command *dmae;
1180 uint32_t opcode;
1181 int loader_idx = PMF_DMAE_C(sc);
1182 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1183
1184 sc->executer_idx = 0;
1185
1186 opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1187
1188 if (sc->port.port_stx) {
1189 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1190
1191 if (sc->func_stx) {
1192 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1193 } else {
1194 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1195 }
1196
1197 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1198 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1199 dmae->dst_addr_lo = sc->port.port_stx >> 2;
1200 dmae->dst_addr_hi = 0;
1201 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1202 if (sc->func_stx) {
1203 dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1204 dmae->comp_addr_hi = 0;
1205 dmae->comp_val = 1;
1206 } else {
1207 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1208 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1209 dmae->comp_val = DMAE_COMP_VAL;
1210
1211 *stats_comp = 0;
1212 }
1213 }
1214
1215 if (sc->func_stx) {
1216 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1217 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1218 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
1219 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
1220 dmae->dst_addr_lo = (sc->func_stx >> 2);
1221 dmae->dst_addr_hi = 0;
1222 dmae->len = (sizeof(struct host_func_stats) >> 2);
1223 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1224 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1225 dmae->comp_val = DMAE_COMP_VAL;
1226
1227 *stats_comp = 0;
1228 }
1229 }
1230
1231 static void
1232 bnx2x_stats_stop(struct bnx2x_softc *sc)
1233 {
1234 uint8_t update = FALSE;
1235
1236 bnx2x_stats_comp(sc);
1237
1238 if (sc->port.pmf) {
1239 update = bnx2x_hw_stats_update(sc) == 0;
1240 }
1241
1242 update |= bnx2x_storm_stats_update(sc) == 0;
1243
1244 if (update) {
1245
1246 if (sc->port.pmf) {
1247 bnx2x_port_stats_stop(sc);
1248 }
1249
1250 bnx2x_hw_stats_post(sc);
1251 bnx2x_stats_comp(sc);
1252 }
1253 }
1254
1255 static void
1256 bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
1257 {
1258 return;
1259 }
1260
1261 static const struct {
1262 void (*action)(struct bnx2x_softc *sc);
1263 enum bnx2x_stats_state next_state;
1264 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1265 {
1266 /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
1267 /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED },
1268 /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
1269 /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
1270 },
1271 {
1272 /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED },
1273 /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED },
1274 /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED },
1275 /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED }
1276 }
1277 };
1278
1279 void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
1280 {
1281 enum bnx2x_stats_state state;
1282
1283 if (unlikely(sc->panic)) {
1284 return;
1285 }
1286
1287 state = sc->stats_state;
1288 sc->stats_state = bnx2x_stats_stm[state][event].next_state;
1289
1290 bnx2x_stats_stm[state][event].action(sc);
1291
1292 if (event != STATS_EVENT_UPDATE) {
1293 PMD_DRV_LOG(DEBUG,
1294 "state %d -> event %d -> state %d",
1295 state, event, sc->stats_state);
1296 }
1297 }
1298
1299 static void
1300 bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
1301 {
1302 struct dmae_command *dmae;
1303 uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1304
1305 /* sanity */
1306 if (!sc->port.pmf || !sc->port.port_stx) {
1307 PMD_DRV_LOG(ERR, "BUG!");
1308 return;
1309 }
1310
1311 sc->executer_idx = 0;
1312
1313 dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1314 dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1315 TRUE, DMAE_COMP_PCI);
1316 dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1317 dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1318 dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1319 dmae->dst_addr_hi = 0;
1320 dmae->len = bnx2x_get_port_stats_dma_len(sc);
1321 dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1322 dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1323 dmae->comp_val = DMAE_COMP_VAL;
1324
1325 *stats_comp = 0;
1326 bnx2x_hw_stats_post(sc);
1327 bnx2x_stats_comp(sc);
1328 }
1329
1330 /*
1331 * This function will prepare the statistics ramrod data the way
1332 * we will only have to increment the statistics counter and
1333 * send the ramrod each time we have to.
1334 */
1335 static void
1336 bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
1337 {
1338 int i;
1339 int first_queue_query_index;
1340 struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1341 phys_addr_t cur_data_offset;
1342 struct stats_query_entry *cur_query_entry;
1343
1344 stats_hdr->cmd_num = sc->fw_stats_num;
1345 stats_hdr->drv_stats_counter = 0;
1346
1347 /*
1348 * The storm_counters struct contains the counters of completed
1349 * statistics requests per storm which are incremented by FW
1350 * each time it completes hadning a statistics ramrod. We will
1351 * check these counters in the timer handler and discard a
1352 * (statistics) ramrod completion.
1353 */
1354 cur_data_offset = (sc->fw_stats_data_mapping +
1355 offsetof(struct bnx2x_fw_stats_data, storm_counters));
1356
1357 stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1358 stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1359
1360 /*
1361 * Prepare the first stats ramrod (will be completed with
1362 * the counters equal to zero) - init counters to somethig different.
1363 */
1364 memset(&sc->fw_stats_data->storm_counters, 0xff,
1365 sizeof(struct stats_counter));
1366
1367 /**** Port FW statistics data ****/
1368 cur_data_offset = (sc->fw_stats_data_mapping +
1369 offsetof(struct bnx2x_fw_stats_data, port));
1370
1371 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1372
1373 cur_query_entry->kind = STATS_TYPE_PORT;
1374 /* For port query index is a DONT CARE */
1375 cur_query_entry->index = SC_PORT(sc);
1376 /* For port query funcID is a DONT CARE */
1377 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1378 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1379 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1380
1381 /**** PF FW statistics data ****/
1382 cur_data_offset = (sc->fw_stats_data_mapping +
1383 offsetof(struct bnx2x_fw_stats_data, pf));
1384
1385 cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1386
1387 cur_query_entry->kind = STATS_TYPE_PF;
1388 /* For PF query index is a DONT CARE */
1389 cur_query_entry->index = SC_PORT(sc);
1390 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1391 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1392 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1393
1394 /**** Clients' queries ****/
1395 cur_data_offset = (sc->fw_stats_data_mapping +
1396 offsetof(struct bnx2x_fw_stats_data, queue_stats));
1397
1398 /*
1399 * First queue query index depends whether FCoE offloaded request will
1400 * be included in the ramrod
1401 */
1402 first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
1403
1404 for (i = 0; i < sc->num_queues; i++) {
1405 cur_query_entry =
1406 &sc->fw_stats_req->query[first_queue_query_index + i];
1407
1408 cur_query_entry->kind = STATS_TYPE_QUEUE;
1409 cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
1410 cur_query_entry->funcID = htole16(SC_FUNC(sc));
1411 cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1412 cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1413
1414 cur_data_offset += sizeof(struct per_queue_stats);
1415 }
1416 }
1417
1418 void bnx2x_memset_stats(struct bnx2x_softc *sc)
1419 {
1420 int i;
1421
1422 /* function stats */
1423 for (i = 0; i < sc->num_queues; i++) {
1424 struct bnx2x_fastpath *fp = &sc->fp[i];
1425
1426 memset(&fp->old_tclient, 0,
1427 sizeof(fp->old_tclient));
1428 memset(&fp->old_uclient, 0,
1429 sizeof(fp->old_uclient));
1430 memset(&fp->old_xclient, 0,
1431 sizeof(fp->old_xclient));
1432 if (sc->stats_init) {
1433 memset(&fp->eth_q_stats, 0,
1434 sizeof(fp->eth_q_stats));
1435 memset(&fp->eth_q_stats_old, 0,
1436 sizeof(fp->eth_q_stats_old));
1437 }
1438 }
1439
1440 if (sc->stats_init) {
1441 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1442 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1443 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1444 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1445 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1446 }
1447
1448 sc->stats_state = STATS_STATE_DISABLED;
1449
1450 if (sc->port.pmf && sc->port.port_stx)
1451 bnx2x_port_stats_base_init(sc);
1452
1453 /* mark the end of statistics initializiation */
1454 sc->stats_init = false;
1455 }
1456
1457 void
1458 bnx2x_stats_init(struct bnx2x_softc *sc)
1459 {
1460 int /*abs*/port = SC_PORT(sc);
1461 int mb_idx = SC_FW_MB_IDX(sc);
1462 int i;
1463
1464 sc->stats_pending = 0;
1465 sc->executer_idx = 0;
1466 sc->stats_counter = 0;
1467
1468 sc->stats_init = TRUE;
1469
1470 /* port and func stats for management */
1471 if (!BNX2X_NOMCP(sc)) {
1472 sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1473 sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1474 } else {
1475 sc->port.port_stx = 0;
1476 sc->func_stx = 0;
1477 }
1478
1479 PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
1480 sc->port.port_stx, sc->func_stx);
1481
1482 /* pmf should retrieve port statistics from SP on a non-init*/
1483 if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1484 bnx2x_stats_handle(sc, STATS_EVENT_PMF);
1485 }
1486
1487 port = SC_PORT(sc);
1488 /* port stats */
1489 memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1490 sc->port.old_nig_stats.brb_discard =
1491 REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1492 sc->port.old_nig_stats.brb_truncate =
1493 REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1494 if (!CHIP_IS_E3(sc)) {
1495 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1496 &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1497 REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1498 &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1499 }
1500
1501 /* function stats */
1502 for (i = 0; i < sc->num_queues; i++) {
1503 memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1504 memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1505 memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1506 if (sc->stats_init) {
1507 memset(&sc->fp[i].eth_q_stats, 0,
1508 sizeof(sc->fp[i].eth_q_stats));
1509 memset(&sc->fp[i].eth_q_stats_old, 0,
1510 sizeof(sc->fp[i].eth_q_stats_old));
1511 }
1512 }
1513
1514 /* prepare statistics ramrod data */
1515 bnx2x_prep_fw_stats_req(sc);
1516
1517 if (sc->stats_init) {
1518 memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1519 memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1520 memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1521 memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1522 memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1523
1524 /* Clean SP from previous statistics */
1525 if (sc->func_stx) {
1526 memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1527 bnx2x_func_stats_init(sc);
1528 bnx2x_hw_stats_post(sc);
1529 bnx2x_stats_comp(sc);
1530 }
1531 }
1532
1533 sc->stats_state = STATS_STATE_DISABLED;
1534
1535 if (sc->port.pmf && sc->port.port_stx) {
1536 bnx2x_port_stats_base_init(sc);
1537 }
1538
1539 /* mark the end of statistics initializiation */
1540 sc->stats_init = FALSE;
1541 }
1542
1543 void
1544 bnx2x_save_statistics(struct bnx2x_softc *sc)
1545 {
1546 int i;
1547
1548 /* save queue statistics */
1549 for (i = 0; i < sc->num_queues; i++) {
1550 struct bnx2x_fastpath *fp = &sc->fp[i];
1551 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1552 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1553
1554 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1555 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1556 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1557 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1558 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1559 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1560 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1561 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1562 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1563 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1564 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1565 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1566 }
1567
1568 /* store port firmware statistics */
1569 if (sc->port.pmf) {
1570 struct bnx2x_eth_stats *estats = &sc->eth_stats;
1571 struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1572 struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
1573
1574 fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1575 fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1576 fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1577 fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1578
1579 if (IS_MF(sc)) {
1580 UPDATE_FW_STAT_OLD(mac_filter_discard);
1581 UPDATE_FW_STAT_OLD(mf_tag_discard);
1582 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1583 UPDATE_FW_STAT_OLD(mac_discard);
1584 }
1585 }
1586 }